net: hns3: modify functions of converting speed ability to ethtool link mode
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/ipv6.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
23 #include "hclge_tm.h"
24 #include "hclge_err.h"
25 #include "hnae3.h"
26 #include "hclge_devlink.h"
27
28 #define HCLGE_NAME                      "hclge"
29 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
30 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
31
32 #define HCLGE_BUF_SIZE_UNIT     256U
33 #define HCLGE_BUF_MUL_BY        2
34 #define HCLGE_BUF_DIV_BY        2
35 #define NEED_RESERVE_TC_NUM     2
36 #define BUF_MAX_PERCENT         100
37 #define BUF_RESERVE_PERCENT     90
38
39 #define HCLGE_RESET_MAX_FAIL_CNT        5
40 #define HCLGE_RESET_SYNC_TIME           100
41 #define HCLGE_PF_RESET_SYNC_TIME        20
42 #define HCLGE_PF_RESET_SYNC_CNT         1500
43
44 /* Get DFX BD number offset */
45 #define HCLGE_DFX_BIOS_BD_OFFSET        1
46 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
47 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
48 #define HCLGE_DFX_IGU_BD_OFFSET         4
49 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
50 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
51 #define HCLGE_DFX_NCSI_BD_OFFSET        7
52 #define HCLGE_DFX_RTC_BD_OFFSET         8
53 #define HCLGE_DFX_PPP_BD_OFFSET         9
54 #define HCLGE_DFX_RCB_BD_OFFSET         10
55 #define HCLGE_DFX_TQP_BD_OFFSET         11
56 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
57
58 #define HCLGE_LINK_STATUS_MS    10
59
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68                                                    unsigned long *addr);
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70
71 static void hclge_sync_mac_table(struct hclge_dev *hdev);
72 static void hclge_restore_hw_table(struct hclge_dev *hdev);
73 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74 static void hclge_sync_fd_table(struct hclge_dev *hdev);
75
76 static struct hnae3_ae_algo ae_algo;
77
78 static struct workqueue_struct *hclge_wq;
79
80 static const struct pci_device_id ae_algo_pci_tbl[] = {
81         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
82         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
83         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
84         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
85         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
86         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
87         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
88         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
89         /* required last entry */
90         {0, }
91 };
92
93 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
94
95 static const u32 cmdq_reg_addr_list[] = {HCLGE_NIC_CSQ_BASEADDR_L_REG,
96                                          HCLGE_NIC_CSQ_BASEADDR_H_REG,
97                                          HCLGE_NIC_CSQ_DEPTH_REG,
98                                          HCLGE_NIC_CSQ_TAIL_REG,
99                                          HCLGE_NIC_CSQ_HEAD_REG,
100                                          HCLGE_NIC_CRQ_BASEADDR_L_REG,
101                                          HCLGE_NIC_CRQ_BASEADDR_H_REG,
102                                          HCLGE_NIC_CRQ_DEPTH_REG,
103                                          HCLGE_NIC_CRQ_TAIL_REG,
104                                          HCLGE_NIC_CRQ_HEAD_REG,
105                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
106                                          HCLGE_CMDQ_INTR_STS_REG,
107                                          HCLGE_CMDQ_INTR_EN_REG,
108                                          HCLGE_CMDQ_INTR_GEN_REG};
109
110 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
111                                            HCLGE_PF_OTHER_INT_REG,
112                                            HCLGE_MISC_RESET_STS_REG,
113                                            HCLGE_MISC_VECTOR_INT_STS,
114                                            HCLGE_GLOBAL_RESET_REG,
115                                            HCLGE_FUN_RST_ING,
116                                            HCLGE_GRO_EN_REG};
117
118 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
119                                          HCLGE_RING_RX_ADDR_H_REG,
120                                          HCLGE_RING_RX_BD_NUM_REG,
121                                          HCLGE_RING_RX_BD_LENGTH_REG,
122                                          HCLGE_RING_RX_MERGE_EN_REG,
123                                          HCLGE_RING_RX_TAIL_REG,
124                                          HCLGE_RING_RX_HEAD_REG,
125                                          HCLGE_RING_RX_FBD_NUM_REG,
126                                          HCLGE_RING_RX_OFFSET_REG,
127                                          HCLGE_RING_RX_FBD_OFFSET_REG,
128                                          HCLGE_RING_RX_STASH_REG,
129                                          HCLGE_RING_RX_BD_ERR_REG,
130                                          HCLGE_RING_TX_ADDR_L_REG,
131                                          HCLGE_RING_TX_ADDR_H_REG,
132                                          HCLGE_RING_TX_BD_NUM_REG,
133                                          HCLGE_RING_TX_PRIORITY_REG,
134                                          HCLGE_RING_TX_TC_REG,
135                                          HCLGE_RING_TX_MERGE_EN_REG,
136                                          HCLGE_RING_TX_TAIL_REG,
137                                          HCLGE_RING_TX_HEAD_REG,
138                                          HCLGE_RING_TX_FBD_NUM_REG,
139                                          HCLGE_RING_TX_OFFSET_REG,
140                                          HCLGE_RING_TX_EBD_NUM_REG,
141                                          HCLGE_RING_TX_EBD_OFFSET_REG,
142                                          HCLGE_RING_TX_BD_ERR_REG,
143                                          HCLGE_RING_EN_REG};
144
145 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
146                                              HCLGE_TQP_INTR_GL0_REG,
147                                              HCLGE_TQP_INTR_GL1_REG,
148                                              HCLGE_TQP_INTR_GL2_REG,
149                                              HCLGE_TQP_INTR_RL_REG};
150
151 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
152         "App    Loopback test",
153         "Serdes serial Loopback test",
154         "Serdes parallel Loopback test",
155         "Phy    Loopback test"
156 };
157
158 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
159         {"mac_tx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
161         {"mac_rx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
163         {"mac_tx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pause_xoff_time)},
165         {"mac_rx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pause_xoff_time)},
167         {"mac_tx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
169         {"mac_rx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
171         {"mac_tx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
173         {"mac_tx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
175         {"mac_tx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
177         {"mac_tx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
179         {"mac_tx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
181         {"mac_tx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
183         {"mac_tx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
185         {"mac_tx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
187         {"mac_tx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
189         {"mac_tx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_xoff_time)},
191         {"mac_tx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_xoff_time)},
193         {"mac_tx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_xoff_time)},
195         {"mac_tx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_xoff_time)},
197         {"mac_tx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_xoff_time)},
199         {"mac_tx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_xoff_time)},
201         {"mac_tx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_xoff_time)},
203         {"mac_tx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_xoff_time)},
205         {"mac_rx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
207         {"mac_rx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
209         {"mac_rx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
211         {"mac_rx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
213         {"mac_rx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
215         {"mac_rx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
217         {"mac_rx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
219         {"mac_rx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
221         {"mac_rx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
223         {"mac_rx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_xoff_time)},
225         {"mac_rx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_xoff_time)},
227         {"mac_rx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_xoff_time)},
229         {"mac_rx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_xoff_time)},
231         {"mac_rx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_xoff_time)},
233         {"mac_rx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_xoff_time)},
235         {"mac_rx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_xoff_time)},
237         {"mac_rx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_xoff_time)},
239         {"mac_tx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
241         {"mac_tx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
243         {"mac_tx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
245         {"mac_tx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
247         {"mac_tx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
249         {"mac_tx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
251         {"mac_tx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
253         {"mac_tx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
255         {"mac_tx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
257         {"mac_tx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
259         {"mac_tx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
261         {"mac_tx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
263         {"mac_tx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
265         {"mac_tx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
266                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
267         {"mac_tx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
268                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
269         {"mac_tx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
270                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
271         {"mac_tx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
272                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
273         {"mac_tx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
274                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
275         {"mac_tx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
276                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
277         {"mac_tx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
278                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
279         {"mac_tx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
280                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
281         {"mac_tx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
282                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
283         {"mac_tx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
284                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
285         {"mac_tx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
286                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
287         {"mac_tx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
288                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
289         {"mac_rx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
290                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
291         {"mac_rx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
292                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
293         {"mac_rx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
294                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
295         {"mac_rx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
296                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
297         {"mac_rx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
298                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
299         {"mac_rx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
300                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
301         {"mac_rx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
302                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
303         {"mac_rx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
304                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
305         {"mac_rx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
306                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
307         {"mac_rx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
308                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
309         {"mac_rx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
310                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
311         {"mac_rx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
312                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
313         {"mac_rx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
314                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
315         {"mac_rx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
316                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
317         {"mac_rx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
318                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
319         {"mac_rx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
320                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
321         {"mac_rx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
322                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
323         {"mac_rx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
324                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
325         {"mac_rx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
326                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
327         {"mac_rx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
328                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
329         {"mac_rx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
330                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
331         {"mac_rx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
332                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
333         {"mac_rx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
334                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
335         {"mac_rx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
336                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
337         {"mac_rx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
338                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
339
340         {"mac_tx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
341                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
342         {"mac_tx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
343                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
344         {"mac_tx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
345                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
346         {"mac_tx_err_all_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
347                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
348         {"mac_tx_from_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
349                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
350         {"mac_tx_from_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
351                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
352         {"mac_rx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
353                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
354         {"mac_rx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
355                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
356         {"mac_rx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
357                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
358         {"mac_rx_fcs_err_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
359                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
360         {"mac_rx_send_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
361                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
362         {"mac_rx_send_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
363                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
364 };
365
366 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
367         {
368                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
369                 .ethter_type = cpu_to_le16(ETH_P_LLDP),
370                 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
371                 .i_port_bitmap = 0x1,
372         },
373 };
374
375 static const u8 hclge_hash_key[] = {
376         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
377         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
378         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
379         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
380         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
381 };
382
383 static const u32 hclge_dfx_bd_offset_list[] = {
384         HCLGE_DFX_BIOS_BD_OFFSET,
385         HCLGE_DFX_SSU_0_BD_OFFSET,
386         HCLGE_DFX_SSU_1_BD_OFFSET,
387         HCLGE_DFX_IGU_BD_OFFSET,
388         HCLGE_DFX_RPU_0_BD_OFFSET,
389         HCLGE_DFX_RPU_1_BD_OFFSET,
390         HCLGE_DFX_NCSI_BD_OFFSET,
391         HCLGE_DFX_RTC_BD_OFFSET,
392         HCLGE_DFX_PPP_BD_OFFSET,
393         HCLGE_DFX_RCB_BD_OFFSET,
394         HCLGE_DFX_TQP_BD_OFFSET,
395         HCLGE_DFX_SSU_2_BD_OFFSET
396 };
397
398 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
399         HCLGE_OPC_DFX_BIOS_COMMON_REG,
400         HCLGE_OPC_DFX_SSU_REG_0,
401         HCLGE_OPC_DFX_SSU_REG_1,
402         HCLGE_OPC_DFX_IGU_EGU_REG,
403         HCLGE_OPC_DFX_RPU_REG_0,
404         HCLGE_OPC_DFX_RPU_REG_1,
405         HCLGE_OPC_DFX_NCSI_REG,
406         HCLGE_OPC_DFX_RTC_REG,
407         HCLGE_OPC_DFX_PPP_REG,
408         HCLGE_OPC_DFX_RCB_REG,
409         HCLGE_OPC_DFX_TQP_REG,
410         HCLGE_OPC_DFX_SSU_REG_2
411 };
412
413 static const struct key_info meta_data_key_info[] = {
414         { PACKET_TYPE_ID, 6 },
415         { IP_FRAGEMENT, 1 },
416         { ROCE_TYPE, 1 },
417         { NEXT_KEY, 5 },
418         { VLAN_NUMBER, 2 },
419         { SRC_VPORT, 12 },
420         { DST_VPORT, 12 },
421         { TUNNEL_PACKET, 1 },
422 };
423
424 static const struct key_info tuple_key_info[] = {
425         { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
426         { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
427         { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
428         { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
429         { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
430         { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
431         { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
432         { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
433         { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
434         { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
435         { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
436         { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
437         { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
438         { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
439         { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
440         { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
441         { INNER_DST_MAC, 48, KEY_OPT_MAC,
442           offsetof(struct hclge_fd_rule, tuples.dst_mac),
443           offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
444         { INNER_SRC_MAC, 48, KEY_OPT_MAC,
445           offsetof(struct hclge_fd_rule, tuples.src_mac),
446           offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
447         { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
448           offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
449           offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
450         { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
451         { INNER_ETH_TYPE, 16, KEY_OPT_LE16,
452           offsetof(struct hclge_fd_rule, tuples.ether_proto),
453           offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
454         { INNER_L2_RSV, 16, KEY_OPT_LE16,
455           offsetof(struct hclge_fd_rule, tuples.l2_user_def),
456           offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
457         { INNER_IP_TOS, 8, KEY_OPT_U8,
458           offsetof(struct hclge_fd_rule, tuples.ip_tos),
459           offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
460         { INNER_IP_PROTO, 8, KEY_OPT_U8,
461           offsetof(struct hclge_fd_rule, tuples.ip_proto),
462           offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
463         { INNER_SRC_IP, 32, KEY_OPT_IP,
464           offsetof(struct hclge_fd_rule, tuples.src_ip),
465           offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
466         { INNER_DST_IP, 32, KEY_OPT_IP,
467           offsetof(struct hclge_fd_rule, tuples.dst_ip),
468           offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
469         { INNER_L3_RSV, 16, KEY_OPT_LE16,
470           offsetof(struct hclge_fd_rule, tuples.l3_user_def),
471           offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
472         { INNER_SRC_PORT, 16, KEY_OPT_LE16,
473           offsetof(struct hclge_fd_rule, tuples.src_port),
474           offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
475         { INNER_DST_PORT, 16, KEY_OPT_LE16,
476           offsetof(struct hclge_fd_rule, tuples.dst_port),
477           offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
478         { INNER_L4_RSV, 32, KEY_OPT_LE32,
479           offsetof(struct hclge_fd_rule, tuples.l4_user_def),
480           offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
481 };
482
483 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
484 {
485 #define HCLGE_MAC_CMD_NUM 21
486
487         u64 *data = (u64 *)(&hdev->mac_stats);
488         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
489         __le64 *desc_data;
490         u32 data_size;
491         int ret;
492         u32 i;
493
494         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
495         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
496         if (ret) {
497                 dev_err(&hdev->pdev->dev,
498                         "Get MAC pkt stats fail, status = %d.\n", ret);
499
500                 return ret;
501         }
502
503         /* The first desc has a 64-bit header, so data size need to minus 1 */
504         data_size = sizeof(desc) / (sizeof(u64)) - 1;
505
506         desc_data = (__le64 *)(&desc[0].data[0]);
507         for (i = 0; i < data_size; i++) {
508                 /* data memory is continuous becase only the first desc has a
509                  * header in this command
510                  */
511                 *data += le64_to_cpu(*desc_data);
512                 data++;
513                 desc_data++;
514         }
515
516         return 0;
517 }
518
519 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev)
520 {
521 #define HCLGE_REG_NUM_PER_DESC          4
522
523         u32 reg_num = hdev->ae_dev->dev_specs.mac_stats_num;
524         u64 *data = (u64 *)(&hdev->mac_stats);
525         struct hclge_desc *desc;
526         __le64 *desc_data;
527         u32 data_size;
528         u32 desc_num;
529         int ret;
530         u32 i;
531
532         /* The first desc has a 64-bit header, so need to consider it */
533         desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1;
534
535         /* This may be called inside atomic sections,
536          * so GFP_ATOMIC is more suitalbe here
537          */
538         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
539         if (!desc)
540                 return -ENOMEM;
541
542         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
543         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
544         if (ret) {
545                 kfree(desc);
546                 return ret;
547         }
548
549         data_size = min_t(u32, sizeof(hdev->mac_stats) / sizeof(u64), reg_num);
550
551         desc_data = (__le64 *)(&desc[0].data[0]);
552         for (i = 0; i < data_size; i++) {
553                 /* data memory is continuous becase only the first desc has a
554                  * header in this command
555                  */
556                 *data += le64_to_cpu(*desc_data);
557                 data++;
558                 desc_data++;
559         }
560
561         kfree(desc);
562
563         return 0;
564 }
565
566 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *reg_num)
567 {
568         struct hclge_desc desc;
569         int ret;
570
571         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
572         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
573         if (ret) {
574                 dev_err(&hdev->pdev->dev,
575                         "failed to query mac statistic reg number, ret = %d\n",
576                         ret);
577                 return ret;
578         }
579
580         *reg_num = le32_to_cpu(desc.data[0]);
581         if (*reg_num == 0) {
582                 dev_err(&hdev->pdev->dev,
583                         "mac statistic reg number is invalid!\n");
584                 return -ENODATA;
585         }
586
587         return 0;
588 }
589
590 static int hclge_mac_update_stats(struct hclge_dev *hdev)
591 {
592         /* The firmware supports the new statistics acquisition method */
593         if (hdev->ae_dev->dev_specs.mac_stats_num)
594                 return hclge_mac_update_stats_complete(hdev);
595         else
596                 return hclge_mac_update_stats_defective(hdev);
597 }
598
599 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
600 {
601         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
602         struct hclge_vport *vport = hclge_get_vport(handle);
603         struct hclge_dev *hdev = vport->back;
604         struct hnae3_queue *queue;
605         struct hclge_desc desc[1];
606         struct hclge_tqp *tqp;
607         int ret, i;
608
609         for (i = 0; i < kinfo->num_tqps; i++) {
610                 queue = handle->kinfo.tqp[i];
611                 tqp = container_of(queue, struct hclge_tqp, q);
612                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
613                 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
614                                            true);
615
616                 desc[0].data[0] = cpu_to_le32(tqp->index);
617                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
618                 if (ret) {
619                         dev_err(&hdev->pdev->dev,
620                                 "Query tqp stat fail, status = %d,queue = %d\n",
621                                 ret, i);
622                         return ret;
623                 }
624                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
625                         le32_to_cpu(desc[0].data[1]);
626         }
627
628         for (i = 0; i < kinfo->num_tqps; i++) {
629                 queue = handle->kinfo.tqp[i];
630                 tqp = container_of(queue, struct hclge_tqp, q);
631                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
632                 hclge_cmd_setup_basic_desc(&desc[0],
633                                            HCLGE_OPC_QUERY_TX_STATS,
634                                            true);
635
636                 desc[0].data[0] = cpu_to_le32(tqp->index);
637                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
638                 if (ret) {
639                         dev_err(&hdev->pdev->dev,
640                                 "Query tqp stat fail, status = %d,queue = %d\n",
641                                 ret, i);
642                         return ret;
643                 }
644                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
645                         le32_to_cpu(desc[0].data[1]);
646         }
647
648         return 0;
649 }
650
651 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
652 {
653         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
654         struct hclge_tqp *tqp;
655         u64 *buff = data;
656         int i;
657
658         for (i = 0; i < kinfo->num_tqps; i++) {
659                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
660                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
661         }
662
663         for (i = 0; i < kinfo->num_tqps; i++) {
664                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
665                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
666         }
667
668         return buff;
669 }
670
671 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
672 {
673         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
674
675         /* each tqp has TX & RX two queues */
676         return kinfo->num_tqps * (2);
677 }
678
679 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
680 {
681         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
682         u8 *buff = data;
683         int i;
684
685         for (i = 0; i < kinfo->num_tqps; i++) {
686                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
687                         struct hclge_tqp, q);
688                 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
689                          tqp->index);
690                 buff = buff + ETH_GSTRING_LEN;
691         }
692
693         for (i = 0; i < kinfo->num_tqps; i++) {
694                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
695                         struct hclge_tqp, q);
696                 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
697                          tqp->index);
698                 buff = buff + ETH_GSTRING_LEN;
699         }
700
701         return buff;
702 }
703
704 static int hclge_comm_get_count(struct hclge_dev *hdev,
705                                 const struct hclge_comm_stats_str strs[],
706                                 u32 size)
707 {
708         int count = 0;
709         u32 i;
710
711         for (i = 0; i < size; i++)
712                 if (strs[i].stats_num <= hdev->ae_dev->dev_specs.mac_stats_num)
713                         count++;
714
715         return count;
716 }
717
718 static u64 *hclge_comm_get_stats(struct hclge_dev *hdev,
719                                  const struct hclge_comm_stats_str strs[],
720                                  int size, u64 *data)
721 {
722         u64 *buf = data;
723         u32 i;
724
725         for (i = 0; i < size; i++) {
726                 if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
727                         continue;
728
729                 *buf = HCLGE_STATS_READ(&hdev->mac_stats, strs[i].offset);
730                 buf++;
731         }
732
733         return buf;
734 }
735
736 static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset,
737                                   const struct hclge_comm_stats_str strs[],
738                                   int size, u8 *data)
739 {
740         char *buff = (char *)data;
741         u32 i;
742
743         if (stringset != ETH_SS_STATS)
744                 return buff;
745
746         for (i = 0; i < size; i++) {
747                 if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
748                         continue;
749
750                 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
751                 buff = buff + ETH_GSTRING_LEN;
752         }
753
754         return (u8 *)buff;
755 }
756
757 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
758 {
759         struct hnae3_handle *handle;
760         int status;
761
762         handle = &hdev->vport[0].nic;
763         if (handle->client) {
764                 status = hclge_tqps_update_stats(handle);
765                 if (status) {
766                         dev_err(&hdev->pdev->dev,
767                                 "Update TQPS stats fail, status = %d.\n",
768                                 status);
769                 }
770         }
771
772         status = hclge_mac_update_stats(hdev);
773         if (status)
774                 dev_err(&hdev->pdev->dev,
775                         "Update MAC stats fail, status = %d.\n", status);
776 }
777
778 static void hclge_update_stats(struct hnae3_handle *handle,
779                                struct net_device_stats *net_stats)
780 {
781         struct hclge_vport *vport = hclge_get_vport(handle);
782         struct hclge_dev *hdev = vport->back;
783         int status;
784
785         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
786                 return;
787
788         status = hclge_mac_update_stats(hdev);
789         if (status)
790                 dev_err(&hdev->pdev->dev,
791                         "Update MAC stats fail, status = %d.\n",
792                         status);
793
794         status = hclge_tqps_update_stats(handle);
795         if (status)
796                 dev_err(&hdev->pdev->dev,
797                         "Update TQPS stats fail, status = %d.\n",
798                         status);
799
800         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
801 }
802
803 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
804 {
805 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
806                 HNAE3_SUPPORT_PHY_LOOPBACK | \
807                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
808                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
809
810         struct hclge_vport *vport = hclge_get_vport(handle);
811         struct hclge_dev *hdev = vport->back;
812         int count = 0;
813
814         /* Loopback test support rules:
815          * mac: only GE mode support
816          * serdes: all mac mode will support include GE/XGE/LGE/CGE
817          * phy: only support when phy device exist on board
818          */
819         if (stringset == ETH_SS_TEST) {
820                 /* clear loopback bit flags at first */
821                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
822                 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
823                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
824                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
825                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
826                         count += 1;
827                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
828                 }
829
830                 count += 2;
831                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
832                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
833
834                 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
835                      hdev->hw.mac.phydev->drv->set_loopback) ||
836                     hnae3_dev_phy_imp_supported(hdev)) {
837                         count += 1;
838                         handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
839                 }
840         } else if (stringset == ETH_SS_STATS) {
841                 count = hclge_comm_get_count(hdev, g_mac_stats_string,
842                                              ARRAY_SIZE(g_mac_stats_string)) +
843                         hclge_tqps_get_sset_count(handle, stringset);
844         }
845
846         return count;
847 }
848
849 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
850                               u8 *data)
851 {
852         struct hclge_vport *vport = hclge_get_vport(handle);
853         struct hclge_dev *hdev = vport->back;
854         u8 *p = (char *)data;
855         int size;
856
857         if (stringset == ETH_SS_STATS) {
858                 size = ARRAY_SIZE(g_mac_stats_string);
859                 p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string,
860                                            size, p);
861                 p = hclge_tqps_get_strings(handle, p);
862         } else if (stringset == ETH_SS_TEST) {
863                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
864                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
865                                ETH_GSTRING_LEN);
866                         p += ETH_GSTRING_LEN;
867                 }
868                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
869                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
870                                ETH_GSTRING_LEN);
871                         p += ETH_GSTRING_LEN;
872                 }
873                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
874                         memcpy(p,
875                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
876                                ETH_GSTRING_LEN);
877                         p += ETH_GSTRING_LEN;
878                 }
879                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
880                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
881                                ETH_GSTRING_LEN);
882                         p += ETH_GSTRING_LEN;
883                 }
884         }
885 }
886
887 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
888 {
889         struct hclge_vport *vport = hclge_get_vport(handle);
890         struct hclge_dev *hdev = vport->back;
891         u64 *p;
892
893         p = hclge_comm_get_stats(hdev, g_mac_stats_string,
894                                  ARRAY_SIZE(g_mac_stats_string), data);
895         p = hclge_tqps_get_stats(handle, p);
896 }
897
898 static void hclge_get_mac_stat(struct hnae3_handle *handle,
899                                struct hns3_mac_stats *mac_stats)
900 {
901         struct hclge_vport *vport = hclge_get_vport(handle);
902         struct hclge_dev *hdev = vport->back;
903
904         hclge_update_stats(handle, NULL);
905
906         mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
907         mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
908 }
909
910 static int hclge_parse_func_status(struct hclge_dev *hdev,
911                                    struct hclge_func_status_cmd *status)
912 {
913 #define HCLGE_MAC_ID_MASK       0xF
914
915         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
916                 return -EINVAL;
917
918         /* Set the pf to main pf */
919         if (status->pf_state & HCLGE_PF_STATE_MAIN)
920                 hdev->flag |= HCLGE_FLAG_MAIN;
921         else
922                 hdev->flag &= ~HCLGE_FLAG_MAIN;
923
924         hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
925         return 0;
926 }
927
928 static int hclge_query_function_status(struct hclge_dev *hdev)
929 {
930 #define HCLGE_QUERY_MAX_CNT     5
931
932         struct hclge_func_status_cmd *req;
933         struct hclge_desc desc;
934         int timeout = 0;
935         int ret;
936
937         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
938         req = (struct hclge_func_status_cmd *)desc.data;
939
940         do {
941                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
942                 if (ret) {
943                         dev_err(&hdev->pdev->dev,
944                                 "query function status failed %d.\n", ret);
945                         return ret;
946                 }
947
948                 /* Check pf reset is done */
949                 if (req->pf_state)
950                         break;
951                 usleep_range(1000, 2000);
952         } while (timeout++ < HCLGE_QUERY_MAX_CNT);
953
954         return hclge_parse_func_status(hdev, req);
955 }
956
957 static int hclge_query_pf_resource(struct hclge_dev *hdev)
958 {
959         struct hclge_pf_res_cmd *req;
960         struct hclge_desc desc;
961         int ret;
962
963         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
964         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
965         if (ret) {
966                 dev_err(&hdev->pdev->dev,
967                         "query pf resource failed %d.\n", ret);
968                 return ret;
969         }
970
971         req = (struct hclge_pf_res_cmd *)desc.data;
972         hdev->num_tqps = le16_to_cpu(req->tqp_num) +
973                          le16_to_cpu(req->ext_tqp_num);
974         hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
975
976         if (req->tx_buf_size)
977                 hdev->tx_buf_size =
978                         le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
979         else
980                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
981
982         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
983
984         if (req->dv_buf_size)
985                 hdev->dv_buf_size =
986                         le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
987         else
988                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
989
990         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
991
992         hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
993         if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
994                 dev_err(&hdev->pdev->dev,
995                         "only %u msi resources available, not enough for pf(min:2).\n",
996                         hdev->num_nic_msi);
997                 return -EINVAL;
998         }
999
1000         if (hnae3_dev_roce_supported(hdev)) {
1001                 hdev->num_roce_msi =
1002                         le16_to_cpu(req->pf_intr_vector_number_roce);
1003
1004                 /* PF should have NIC vectors and Roce vectors,
1005                  * NIC vectors are queued before Roce vectors.
1006                  */
1007                 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
1008         } else {
1009                 hdev->num_msi = hdev->num_nic_msi;
1010         }
1011
1012         return 0;
1013 }
1014
1015 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
1016 {
1017         switch (speed_cmd) {
1018         case HCLGE_FW_MAC_SPEED_10M:
1019                 *speed = HCLGE_MAC_SPEED_10M;
1020                 break;
1021         case HCLGE_FW_MAC_SPEED_100M:
1022                 *speed = HCLGE_MAC_SPEED_100M;
1023                 break;
1024         case HCLGE_FW_MAC_SPEED_1G:
1025                 *speed = HCLGE_MAC_SPEED_1G;
1026                 break;
1027         case HCLGE_FW_MAC_SPEED_10G:
1028                 *speed = HCLGE_MAC_SPEED_10G;
1029                 break;
1030         case HCLGE_FW_MAC_SPEED_25G:
1031                 *speed = HCLGE_MAC_SPEED_25G;
1032                 break;
1033         case HCLGE_FW_MAC_SPEED_40G:
1034                 *speed = HCLGE_MAC_SPEED_40G;
1035                 break;
1036         case HCLGE_FW_MAC_SPEED_50G:
1037                 *speed = HCLGE_MAC_SPEED_50G;
1038                 break;
1039         case HCLGE_FW_MAC_SPEED_100G:
1040                 *speed = HCLGE_MAC_SPEED_100G;
1041                 break;
1042         case HCLGE_FW_MAC_SPEED_200G:
1043                 *speed = HCLGE_MAC_SPEED_200G;
1044                 break;
1045         default:
1046                 return -EINVAL;
1047         }
1048
1049         return 0;
1050 }
1051
1052 static const struct hclge_speed_bit_map speed_bit_map[] = {
1053         {HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT},
1054         {HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT},
1055         {HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT},
1056         {HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT},
1057         {HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT},
1058         {HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT},
1059         {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT},
1060         {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT},
1061         {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT},
1062 };
1063
1064 static int hclge_get_speed_bit(u32 speed, u32 *speed_bit)
1065 {
1066         u16 i;
1067
1068         for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) {
1069                 if (speed == speed_bit_map[i].speed) {
1070                         *speed_bit = speed_bit_map[i].speed_bit;
1071                         return 0;
1072                 }
1073         }
1074
1075         return -EINVAL;
1076 }
1077
1078 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
1079 {
1080         struct hclge_vport *vport = hclge_get_vport(handle);
1081         struct hclge_dev *hdev = vport->back;
1082         u32 speed_ability = hdev->hw.mac.speed_ability;
1083         u32 speed_bit = 0;
1084         int ret;
1085
1086         ret = hclge_get_speed_bit(speed, &speed_bit);
1087         if (ret)
1088                 return ret;
1089
1090         if (speed_bit & speed_ability)
1091                 return 0;
1092
1093         return -EINVAL;
1094 }
1095
1096 static void hclge_convert_setting_sr(u16 speed_ability,
1097                                      unsigned long *link_mode)
1098 {
1099         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1100                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1101                                  link_mode);
1102         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1103                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1104                                  link_mode);
1105         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1106                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1107                                  link_mode);
1108         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1109                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1110                                  link_mode);
1111         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1112                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1113                                  link_mode);
1114         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1115                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1116                                  link_mode);
1117 }
1118
1119 static void hclge_convert_setting_lr(u16 speed_ability,
1120                                      unsigned long *link_mode)
1121 {
1122         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1123                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1124                                  link_mode);
1125         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1126                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1127                                  link_mode);
1128         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1129                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1130                                  link_mode);
1131         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1132                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1133                                  link_mode);
1134         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1135                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1136                                  link_mode);
1137         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1138                 linkmode_set_bit(
1139                         ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1140                         link_mode);
1141 }
1142
1143 static void hclge_convert_setting_cr(u16 speed_ability,
1144                                      unsigned long *link_mode)
1145 {
1146         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1147                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1148                                  link_mode);
1149         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1150                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1151                                  link_mode);
1152         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1153                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1154                                  link_mode);
1155         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1156                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1157                                  link_mode);
1158         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1159                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1160                                  link_mode);
1161         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1162                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1163                                  link_mode);
1164 }
1165
1166 static void hclge_convert_setting_kr(u16 speed_ability,
1167                                      unsigned long *link_mode)
1168 {
1169         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1171                                  link_mode);
1172         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1173                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1174                                  link_mode);
1175         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1176                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1177                                  link_mode);
1178         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1179                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1180                                  link_mode);
1181         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1182                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1183                                  link_mode);
1184         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1185                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1186                                  link_mode);
1187         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1188                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1189                                  link_mode);
1190 }
1191
1192 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1193 {
1194         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1195         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1196
1197         switch (mac->speed) {
1198         case HCLGE_MAC_SPEED_10G:
1199         case HCLGE_MAC_SPEED_40G:
1200                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1201                                  mac->supported);
1202                 mac->fec_ability =
1203                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1204                 break;
1205         case HCLGE_MAC_SPEED_25G:
1206         case HCLGE_MAC_SPEED_50G:
1207                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1208                                  mac->supported);
1209                 mac->fec_ability =
1210                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1211                         BIT(HNAE3_FEC_AUTO);
1212                 break;
1213         case HCLGE_MAC_SPEED_100G:
1214         case HCLGE_MAC_SPEED_200G:
1215                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1216                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1217                 break;
1218         default:
1219                 mac->fec_ability = 0;
1220                 break;
1221         }
1222 }
1223
1224 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1225                                         u16 speed_ability)
1226 {
1227         struct hclge_mac *mac = &hdev->hw.mac;
1228
1229         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1230                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1231                                  mac->supported);
1232
1233         hclge_convert_setting_sr(speed_ability, mac->supported);
1234         hclge_convert_setting_lr(speed_ability, mac->supported);
1235         hclge_convert_setting_cr(speed_ability, mac->supported);
1236         if (hnae3_dev_fec_supported(hdev))
1237                 hclge_convert_setting_fec(mac);
1238
1239         if (hnae3_dev_pause_supported(hdev))
1240                 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1241
1242         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1243         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1244 }
1245
1246 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1247                                             u16 speed_ability)
1248 {
1249         struct hclge_mac *mac = &hdev->hw.mac;
1250
1251         hclge_convert_setting_kr(speed_ability, mac->supported);
1252         if (hnae3_dev_fec_supported(hdev))
1253                 hclge_convert_setting_fec(mac);
1254
1255         if (hnae3_dev_pause_supported(hdev))
1256                 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1257
1258         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1259         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1260 }
1261
1262 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1263                                          u16 speed_ability)
1264 {
1265         unsigned long *supported = hdev->hw.mac.supported;
1266
1267         /* default to support all speed for GE port */
1268         if (!speed_ability)
1269                 speed_ability = HCLGE_SUPPORT_GE;
1270
1271         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1272                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1273                                  supported);
1274
1275         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1276                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1277                                  supported);
1278                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1279                                  supported);
1280         }
1281
1282         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1283                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1284                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1285         }
1286
1287         if (hnae3_dev_pause_supported(hdev)) {
1288                 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1289                 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1290         }
1291
1292         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1293         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1294 }
1295
1296 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1297 {
1298         u8 media_type = hdev->hw.mac.media_type;
1299
1300         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1301                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1302         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1303                 hclge_parse_copper_link_mode(hdev, speed_ability);
1304         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1305                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1306 }
1307
1308 static u32 hclge_get_max_speed(u16 speed_ability)
1309 {
1310         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1311                 return HCLGE_MAC_SPEED_200G;
1312
1313         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1314                 return HCLGE_MAC_SPEED_100G;
1315
1316         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1317                 return HCLGE_MAC_SPEED_50G;
1318
1319         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1320                 return HCLGE_MAC_SPEED_40G;
1321
1322         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1323                 return HCLGE_MAC_SPEED_25G;
1324
1325         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1326                 return HCLGE_MAC_SPEED_10G;
1327
1328         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1329                 return HCLGE_MAC_SPEED_1G;
1330
1331         if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1332                 return HCLGE_MAC_SPEED_100M;
1333
1334         if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1335                 return HCLGE_MAC_SPEED_10M;
1336
1337         return HCLGE_MAC_SPEED_1G;
1338 }
1339
1340 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1341 {
1342 #define HCLGE_TX_SPARE_SIZE_UNIT                4096
1343 #define SPEED_ABILITY_EXT_SHIFT                 8
1344
1345         struct hclge_cfg_param_cmd *req;
1346         u64 mac_addr_tmp_high;
1347         u16 speed_ability_ext;
1348         u64 mac_addr_tmp;
1349         unsigned int i;
1350
1351         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1352
1353         /* get the configuration */
1354         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1355                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1356         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1357                                             HCLGE_CFG_TQP_DESC_N_M,
1358                                             HCLGE_CFG_TQP_DESC_N_S);
1359
1360         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1361                                         HCLGE_CFG_PHY_ADDR_M,
1362                                         HCLGE_CFG_PHY_ADDR_S);
1363         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1364                                           HCLGE_CFG_MEDIA_TP_M,
1365                                           HCLGE_CFG_MEDIA_TP_S);
1366         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1367                                           HCLGE_CFG_RX_BUF_LEN_M,
1368                                           HCLGE_CFG_RX_BUF_LEN_S);
1369         /* get mac_address */
1370         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1371         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1372                                             HCLGE_CFG_MAC_ADDR_H_M,
1373                                             HCLGE_CFG_MAC_ADDR_H_S);
1374
1375         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1376
1377         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1378                                              HCLGE_CFG_DEFAULT_SPEED_M,
1379                                              HCLGE_CFG_DEFAULT_SPEED_S);
1380         cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1381                                                HCLGE_CFG_RSS_SIZE_M,
1382                                                HCLGE_CFG_RSS_SIZE_S);
1383
1384         for (i = 0; i < ETH_ALEN; i++)
1385                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1386
1387         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1388         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1389
1390         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1391                                              HCLGE_CFG_SPEED_ABILITY_M,
1392                                              HCLGE_CFG_SPEED_ABILITY_S);
1393         speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1394                                             HCLGE_CFG_SPEED_ABILITY_EXT_M,
1395                                             HCLGE_CFG_SPEED_ABILITY_EXT_S);
1396         cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1397
1398         cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
1399                                                HCLGE_CFG_VLAN_FLTR_CAP_M,
1400                                                HCLGE_CFG_VLAN_FLTR_CAP_S);
1401
1402         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1403                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1404                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1405
1406         cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1407                                                HCLGE_CFG_PF_RSS_SIZE_M,
1408                                                HCLGE_CFG_PF_RSS_SIZE_S);
1409
1410         /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1411          * power of 2, instead of reading out directly. This would
1412          * be more flexible for future changes and expansions.
1413          * When VF max  rss size field is HCLGE_CFG_RSS_SIZE_S,
1414          * it does not make sense if PF's field is 0. In this case, PF and VF
1415          * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1416          */
1417         cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1418                                1U << cfg->pf_rss_size_max :
1419                                cfg->vf_rss_size_max;
1420
1421         /* The unit of the tx spare buffer size queried from configuration
1422          * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is
1423          * needed here.
1424          */
1425         cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]),
1426                                                  HCLGE_CFG_TX_SPARE_BUF_SIZE_M,
1427                                                  HCLGE_CFG_TX_SPARE_BUF_SIZE_S);
1428         cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT;
1429 }
1430
1431 /* hclge_get_cfg: query the static parameter from flash
1432  * @hdev: pointer to struct hclge_dev
1433  * @hcfg: the config structure to be getted
1434  */
1435 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1436 {
1437         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1438         struct hclge_cfg_param_cmd *req;
1439         unsigned int i;
1440         int ret;
1441
1442         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1443                 u32 offset = 0;
1444
1445                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1446                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1447                                            true);
1448                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1449                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1450                 /* Len should be united by 4 bytes when send to hardware */
1451                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1452                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1453                 req->offset = cpu_to_le32(offset);
1454         }
1455
1456         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1457         if (ret) {
1458                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1459                 return ret;
1460         }
1461
1462         hclge_parse_cfg(hcfg, desc);
1463
1464         return 0;
1465 }
1466
1467 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1468 {
1469 #define HCLGE_MAX_NON_TSO_BD_NUM                        8U
1470
1471         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1472
1473         ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1474         ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1475         ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1476         ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1477         ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1478         ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1479         ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1480         ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1481 }
1482
1483 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1484                                   struct hclge_desc *desc)
1485 {
1486         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1487         struct hclge_dev_specs_0_cmd *req0;
1488         struct hclge_dev_specs_1_cmd *req1;
1489
1490         req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1491         req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1492
1493         ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1494         ae_dev->dev_specs.rss_ind_tbl_size =
1495                 le16_to_cpu(req0->rss_ind_tbl_size);
1496         ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1497         ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1498         ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1499         ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1500         ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1501         ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1502         ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size);
1503         ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size);
1504 }
1505
1506 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1507 {
1508         struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1509
1510         if (!dev_specs->max_non_tso_bd_num)
1511                 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1512         if (!dev_specs->rss_ind_tbl_size)
1513                 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1514         if (!dev_specs->rss_key_size)
1515                 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1516         if (!dev_specs->max_tm_rate)
1517                 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1518         if (!dev_specs->max_qset_num)
1519                 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1520         if (!dev_specs->max_int_gl)
1521                 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1522         if (!dev_specs->max_frm_size)
1523                 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1524         if (!dev_specs->umv_size)
1525                 dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1526 }
1527
1528 static int hclge_query_mac_stats_num(struct hclge_dev *hdev)
1529 {
1530         u32 reg_num = 0;
1531         int ret;
1532
1533         ret = hclge_mac_query_reg_num(hdev, &reg_num);
1534         if (ret && ret != -EOPNOTSUPP)
1535                 return ret;
1536
1537         hdev->ae_dev->dev_specs.mac_stats_num = reg_num;
1538         return 0;
1539 }
1540
1541 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1542 {
1543         struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1544         int ret;
1545         int i;
1546
1547         ret = hclge_query_mac_stats_num(hdev);
1548         if (ret)
1549                 return ret;
1550
1551         /* set default specifications as devices lower than version V3 do not
1552          * support querying specifications from firmware.
1553          */
1554         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1555                 hclge_set_default_dev_specs(hdev);
1556                 return 0;
1557         }
1558
1559         for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1560                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1561                                            true);
1562                 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1563         }
1564         hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1565
1566         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1567         if (ret)
1568                 return ret;
1569
1570         hclge_parse_dev_specs(hdev, desc);
1571         hclge_check_dev_specs(hdev);
1572
1573         return 0;
1574 }
1575
1576 static int hclge_get_cap(struct hclge_dev *hdev)
1577 {
1578         int ret;
1579
1580         ret = hclge_query_function_status(hdev);
1581         if (ret) {
1582                 dev_err(&hdev->pdev->dev,
1583                         "query function status error %d.\n", ret);
1584                 return ret;
1585         }
1586
1587         /* get pf resource */
1588         return hclge_query_pf_resource(hdev);
1589 }
1590
1591 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1592 {
1593 #define HCLGE_MIN_TX_DESC       64
1594 #define HCLGE_MIN_RX_DESC       64
1595
1596         if (!is_kdump_kernel())
1597                 return;
1598
1599         dev_info(&hdev->pdev->dev,
1600                  "Running kdump kernel. Using minimal resources\n");
1601
1602         /* minimal queue pairs equals to the number of vports */
1603         hdev->num_tqps = hdev->num_req_vfs + 1;
1604         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1605         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1606 }
1607
1608 static int hclge_configure(struct hclge_dev *hdev)
1609 {
1610         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1611         const struct cpumask *cpumask = cpu_online_mask;
1612         struct hclge_cfg cfg;
1613         unsigned int i;
1614         int node, ret;
1615
1616         ret = hclge_get_cfg(hdev, &cfg);
1617         if (ret)
1618                 return ret;
1619
1620         hdev->base_tqp_pid = 0;
1621         hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1622         hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1623         hdev->rx_buf_len = cfg.rx_buf_len;
1624         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1625         hdev->hw.mac.media_type = cfg.media_type;
1626         hdev->hw.mac.phy_addr = cfg.phy_addr;
1627         hdev->num_tx_desc = cfg.tqp_desc_num;
1628         hdev->num_rx_desc = cfg.tqp_desc_num;
1629         hdev->tm_info.num_pg = 1;
1630         hdev->tc_max = cfg.tc_num;
1631         hdev->tm_info.hw_pfc_map = 0;
1632         if (cfg.umv_space)
1633                 hdev->wanted_umv_size = cfg.umv_space;
1634         else
1635                 hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size;
1636         hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
1637         hdev->gro_en = true;
1638         if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
1639                 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
1640
1641         if (hnae3_dev_fd_supported(hdev)) {
1642                 hdev->fd_en = true;
1643                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1644         }
1645
1646         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1647         if (ret) {
1648                 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1649                         cfg.default_speed, ret);
1650                 return ret;
1651         }
1652
1653         hclge_parse_link_mode(hdev, cfg.speed_ability);
1654
1655         hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1656
1657         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1658             (hdev->tc_max < 1)) {
1659                 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1660                          hdev->tc_max);
1661                 hdev->tc_max = 1;
1662         }
1663
1664         /* Dev does not support DCB */
1665         if (!hnae3_dev_dcb_supported(hdev)) {
1666                 hdev->tc_max = 1;
1667                 hdev->pfc_max = 0;
1668         } else {
1669                 hdev->pfc_max = hdev->tc_max;
1670         }
1671
1672         hdev->tm_info.num_tc = 1;
1673
1674         /* Currently not support uncontiuous tc */
1675         for (i = 0; i < hdev->tm_info.num_tc; i++)
1676                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1677
1678         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1679
1680         hclge_init_kdump_kernel_config(hdev);
1681
1682         /* Set the affinity based on numa node */
1683         node = dev_to_node(&hdev->pdev->dev);
1684         if (node != NUMA_NO_NODE)
1685                 cpumask = cpumask_of_node(node);
1686
1687         cpumask_copy(&hdev->affinity_mask, cpumask);
1688
1689         return ret;
1690 }
1691
1692 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1693                             u16 tso_mss_max)
1694 {
1695         struct hclge_cfg_tso_status_cmd *req;
1696         struct hclge_desc desc;
1697
1698         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1699
1700         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1701         req->tso_mss_min = cpu_to_le16(tso_mss_min);
1702         req->tso_mss_max = cpu_to_le16(tso_mss_max);
1703
1704         return hclge_cmd_send(&hdev->hw, &desc, 1);
1705 }
1706
1707 static int hclge_config_gro(struct hclge_dev *hdev)
1708 {
1709         struct hclge_cfg_gro_status_cmd *req;
1710         struct hclge_desc desc;
1711         int ret;
1712
1713         if (!hnae3_dev_gro_supported(hdev))
1714                 return 0;
1715
1716         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1717         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1718
1719         req->gro_en = hdev->gro_en ? 1 : 0;
1720
1721         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1722         if (ret)
1723                 dev_err(&hdev->pdev->dev,
1724                         "GRO hardware config cmd failed, ret = %d\n", ret);
1725
1726         return ret;
1727 }
1728
1729 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1730 {
1731         struct hclge_tqp *tqp;
1732         int i;
1733
1734         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1735                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1736         if (!hdev->htqp)
1737                 return -ENOMEM;
1738
1739         tqp = hdev->htqp;
1740
1741         for (i = 0; i < hdev->num_tqps; i++) {
1742                 tqp->dev = &hdev->pdev->dev;
1743                 tqp->index = i;
1744
1745                 tqp->q.ae_algo = &ae_algo;
1746                 tqp->q.buf_size = hdev->rx_buf_len;
1747                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1748                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1749
1750                 /* need an extended offset to configure queues >=
1751                  * HCLGE_TQP_MAX_SIZE_DEV_V2
1752                  */
1753                 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1754                         tqp->q.io_base = hdev->hw.io_base +
1755                                          HCLGE_TQP_REG_OFFSET +
1756                                          i * HCLGE_TQP_REG_SIZE;
1757                 else
1758                         tqp->q.io_base = hdev->hw.io_base +
1759                                          HCLGE_TQP_REG_OFFSET +
1760                                          HCLGE_TQP_EXT_REG_OFFSET +
1761                                          (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1762                                          HCLGE_TQP_REG_SIZE;
1763
1764                 tqp++;
1765         }
1766
1767         return 0;
1768 }
1769
1770 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1771                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1772 {
1773         struct hclge_tqp_map_cmd *req;
1774         struct hclge_desc desc;
1775         int ret;
1776
1777         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1778
1779         req = (struct hclge_tqp_map_cmd *)desc.data;
1780         req->tqp_id = cpu_to_le16(tqp_pid);
1781         req->tqp_vf = func_id;
1782         req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1783         if (!is_pf)
1784                 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1785         req->tqp_vid = cpu_to_le16(tqp_vid);
1786
1787         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1788         if (ret)
1789                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1790
1791         return ret;
1792 }
1793
1794 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1795 {
1796         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1797         struct hclge_dev *hdev = vport->back;
1798         int i, alloced;
1799
1800         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1801              alloced < num_tqps; i++) {
1802                 if (!hdev->htqp[i].alloced) {
1803                         hdev->htqp[i].q.handle = &vport->nic;
1804                         hdev->htqp[i].q.tqp_index = alloced;
1805                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1806                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1807                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1808                         hdev->htqp[i].alloced = true;
1809                         alloced++;
1810                 }
1811         }
1812         vport->alloc_tqps = alloced;
1813         kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1814                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1815
1816         /* ensure one to one mapping between irq and queue at default */
1817         kinfo->rss_size = min_t(u16, kinfo->rss_size,
1818                                 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1819
1820         return 0;
1821 }
1822
1823 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1824                             u16 num_tx_desc, u16 num_rx_desc)
1825
1826 {
1827         struct hnae3_handle *nic = &vport->nic;
1828         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1829         struct hclge_dev *hdev = vport->back;
1830         int ret;
1831
1832         kinfo->num_tx_desc = num_tx_desc;
1833         kinfo->num_rx_desc = num_rx_desc;
1834
1835         kinfo->rx_buf_len = hdev->rx_buf_len;
1836         kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size;
1837
1838         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1839                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1840         if (!kinfo->tqp)
1841                 return -ENOMEM;
1842
1843         ret = hclge_assign_tqp(vport, num_tqps);
1844         if (ret)
1845                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1846
1847         return ret;
1848 }
1849
1850 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1851                                   struct hclge_vport *vport)
1852 {
1853         struct hnae3_handle *nic = &vport->nic;
1854         struct hnae3_knic_private_info *kinfo;
1855         u16 i;
1856
1857         kinfo = &nic->kinfo;
1858         for (i = 0; i < vport->alloc_tqps; i++) {
1859                 struct hclge_tqp *q =
1860                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1861                 bool is_pf;
1862                 int ret;
1863
1864                 is_pf = !(vport->vport_id);
1865                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1866                                              i, is_pf);
1867                 if (ret)
1868                         return ret;
1869         }
1870
1871         return 0;
1872 }
1873
1874 static int hclge_map_tqp(struct hclge_dev *hdev)
1875 {
1876         struct hclge_vport *vport = hdev->vport;
1877         u16 i, num_vport;
1878
1879         num_vport = hdev->num_req_vfs + 1;
1880         for (i = 0; i < num_vport; i++) {
1881                 int ret;
1882
1883                 ret = hclge_map_tqp_to_vport(hdev, vport);
1884                 if (ret)
1885                         return ret;
1886
1887                 vport++;
1888         }
1889
1890         return 0;
1891 }
1892
1893 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1894 {
1895         struct hnae3_handle *nic = &vport->nic;
1896         struct hclge_dev *hdev = vport->back;
1897         int ret;
1898
1899         nic->pdev = hdev->pdev;
1900         nic->ae_algo = &ae_algo;
1901         nic->numa_node_mask = hdev->numa_node_mask;
1902         nic->kinfo.io_base = hdev->hw.io_base;
1903
1904         ret = hclge_knic_setup(vport, num_tqps,
1905                                hdev->num_tx_desc, hdev->num_rx_desc);
1906         if (ret)
1907                 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1908
1909         return ret;
1910 }
1911
1912 static int hclge_alloc_vport(struct hclge_dev *hdev)
1913 {
1914         struct pci_dev *pdev = hdev->pdev;
1915         struct hclge_vport *vport;
1916         u32 tqp_main_vport;
1917         u32 tqp_per_vport;
1918         int num_vport, i;
1919         int ret;
1920
1921         /* We need to alloc a vport for main NIC of PF */
1922         num_vport = hdev->num_req_vfs + 1;
1923
1924         if (hdev->num_tqps < num_vport) {
1925                 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1926                         hdev->num_tqps, num_vport);
1927                 return -EINVAL;
1928         }
1929
1930         /* Alloc the same number of TQPs for every vport */
1931         tqp_per_vport = hdev->num_tqps / num_vport;
1932         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1933
1934         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1935                              GFP_KERNEL);
1936         if (!vport)
1937                 return -ENOMEM;
1938
1939         hdev->vport = vport;
1940         hdev->num_alloc_vport = num_vport;
1941
1942         if (IS_ENABLED(CONFIG_PCI_IOV))
1943                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1944
1945         for (i = 0; i < num_vport; i++) {
1946                 vport->back = hdev;
1947                 vport->vport_id = i;
1948                 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1949                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1950                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1951                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1952                 vport->req_vlan_fltr_en = true;
1953                 INIT_LIST_HEAD(&vport->vlan_list);
1954                 INIT_LIST_HEAD(&vport->uc_mac_list);
1955                 INIT_LIST_HEAD(&vport->mc_mac_list);
1956                 spin_lock_init(&vport->mac_list_lock);
1957
1958                 if (i == 0)
1959                         ret = hclge_vport_setup(vport, tqp_main_vport);
1960                 else
1961                         ret = hclge_vport_setup(vport, tqp_per_vport);
1962                 if (ret) {
1963                         dev_err(&pdev->dev,
1964                                 "vport setup failed for vport %d, %d\n",
1965                                 i, ret);
1966                         return ret;
1967                 }
1968
1969                 vport++;
1970         }
1971
1972         return 0;
1973 }
1974
1975 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1976                                     struct hclge_pkt_buf_alloc *buf_alloc)
1977 {
1978 /* TX buffer size is unit by 128 byte */
1979 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1980 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1981         struct hclge_tx_buff_alloc_cmd *req;
1982         struct hclge_desc desc;
1983         int ret;
1984         u8 i;
1985
1986         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1987
1988         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1989         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1990                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1991
1992                 req->tx_pkt_buff[i] =
1993                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1994                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1995         }
1996
1997         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1998         if (ret)
1999                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
2000                         ret);
2001
2002         return ret;
2003 }
2004
2005 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
2006                                  struct hclge_pkt_buf_alloc *buf_alloc)
2007 {
2008         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
2009
2010         if (ret)
2011                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
2012
2013         return ret;
2014 }
2015
2016 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
2017 {
2018         unsigned int i;
2019         u32 cnt = 0;
2020
2021         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
2022                 if (hdev->hw_tc_map & BIT(i))
2023                         cnt++;
2024         return cnt;
2025 }
2026
2027 /* Get the number of pfc enabled TCs, which have private buffer */
2028 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
2029                                   struct hclge_pkt_buf_alloc *buf_alloc)
2030 {
2031         struct hclge_priv_buf *priv;
2032         unsigned int i;
2033         int cnt = 0;
2034
2035         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2036                 priv = &buf_alloc->priv_buf[i];
2037                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
2038                     priv->enable)
2039                         cnt++;
2040         }
2041
2042         return cnt;
2043 }
2044
2045 /* Get the number of pfc disabled TCs, which have private buffer */
2046 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
2047                                      struct hclge_pkt_buf_alloc *buf_alloc)
2048 {
2049         struct hclge_priv_buf *priv;
2050         unsigned int i;
2051         int cnt = 0;
2052
2053         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2054                 priv = &buf_alloc->priv_buf[i];
2055                 if (hdev->hw_tc_map & BIT(i) &&
2056                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
2057                     priv->enable)
2058                         cnt++;
2059         }
2060
2061         return cnt;
2062 }
2063
2064 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
2065 {
2066         struct hclge_priv_buf *priv;
2067         u32 rx_priv = 0;
2068         int i;
2069
2070         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2071                 priv = &buf_alloc->priv_buf[i];
2072                 if (priv->enable)
2073                         rx_priv += priv->buf_size;
2074         }
2075         return rx_priv;
2076 }
2077
2078 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
2079 {
2080         u32 i, total_tx_size = 0;
2081
2082         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
2083                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
2084
2085         return total_tx_size;
2086 }
2087
2088 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
2089                                 struct hclge_pkt_buf_alloc *buf_alloc,
2090                                 u32 rx_all)
2091 {
2092         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
2093         u32 tc_num = hclge_get_tc_num(hdev);
2094         u32 shared_buf, aligned_mps;
2095         u32 rx_priv;
2096         int i;
2097
2098         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2099
2100         if (hnae3_dev_dcb_supported(hdev))
2101                 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
2102                                         hdev->dv_buf_size;
2103         else
2104                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
2105                                         + hdev->dv_buf_size;
2106
2107         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2108         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2109                              HCLGE_BUF_SIZE_UNIT);
2110
2111         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2112         if (rx_all < rx_priv + shared_std)
2113                 return false;
2114
2115         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2116         buf_alloc->s_buf.buf_size = shared_buf;
2117         if (hnae3_dev_dcb_supported(hdev)) {
2118                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2119                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2120                         - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2121                                   HCLGE_BUF_SIZE_UNIT);
2122         } else {
2123                 buf_alloc->s_buf.self.high = aligned_mps +
2124                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
2125                 buf_alloc->s_buf.self.low = aligned_mps;
2126         }
2127
2128         if (hnae3_dev_dcb_supported(hdev)) {
2129                 hi_thrd = shared_buf - hdev->dv_buf_size;
2130
2131                 if (tc_num <= NEED_RESERVE_TC_NUM)
2132                         hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2133                                         / BUF_MAX_PERCENT;
2134
2135                 if (tc_num)
2136                         hi_thrd = hi_thrd / tc_num;
2137
2138                 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2139                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2140                 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2141         } else {
2142                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2143                 lo_thrd = aligned_mps;
2144         }
2145
2146         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2147                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2148                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2149         }
2150
2151         return true;
2152 }
2153
2154 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2155                                 struct hclge_pkt_buf_alloc *buf_alloc)
2156 {
2157         u32 i, total_size;
2158
2159         total_size = hdev->pkt_buf_size;
2160
2161         /* alloc tx buffer for all enabled tc */
2162         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2163                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2164
2165                 if (hdev->hw_tc_map & BIT(i)) {
2166                         if (total_size < hdev->tx_buf_size)
2167                                 return -ENOMEM;
2168
2169                         priv->tx_buf_size = hdev->tx_buf_size;
2170                 } else {
2171                         priv->tx_buf_size = 0;
2172                 }
2173
2174                 total_size -= priv->tx_buf_size;
2175         }
2176
2177         return 0;
2178 }
2179
2180 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2181                                   struct hclge_pkt_buf_alloc *buf_alloc)
2182 {
2183         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2184         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2185         unsigned int i;
2186
2187         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2188                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2189
2190                 priv->enable = 0;
2191                 priv->wl.low = 0;
2192                 priv->wl.high = 0;
2193                 priv->buf_size = 0;
2194
2195                 if (!(hdev->hw_tc_map & BIT(i)))
2196                         continue;
2197
2198                 priv->enable = 1;
2199
2200                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2201                         priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2202                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
2203                                                 HCLGE_BUF_SIZE_UNIT);
2204                 } else {
2205                         priv->wl.low = 0;
2206                         priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2207                                         aligned_mps;
2208                 }
2209
2210                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2211         }
2212
2213         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2214 }
2215
2216 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2217                                           struct hclge_pkt_buf_alloc *buf_alloc)
2218 {
2219         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2220         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2221         int i;
2222
2223         /* let the last to be cleared first */
2224         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2225                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2226                 unsigned int mask = BIT((unsigned int)i);
2227
2228                 if (hdev->hw_tc_map & mask &&
2229                     !(hdev->tm_info.hw_pfc_map & mask)) {
2230                         /* Clear the no pfc TC private buffer */
2231                         priv->wl.low = 0;
2232                         priv->wl.high = 0;
2233                         priv->buf_size = 0;
2234                         priv->enable = 0;
2235                         no_pfc_priv_num--;
2236                 }
2237
2238                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2239                     no_pfc_priv_num == 0)
2240                         break;
2241         }
2242
2243         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2244 }
2245
2246 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2247                                         struct hclge_pkt_buf_alloc *buf_alloc)
2248 {
2249         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2250         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2251         int i;
2252
2253         /* let the last to be cleared first */
2254         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2255                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2256                 unsigned int mask = BIT((unsigned int)i);
2257
2258                 if (hdev->hw_tc_map & mask &&
2259                     hdev->tm_info.hw_pfc_map & mask) {
2260                         /* Reduce the number of pfc TC with private buffer */
2261                         priv->wl.low = 0;
2262                         priv->enable = 0;
2263                         priv->wl.high = 0;
2264                         priv->buf_size = 0;
2265                         pfc_priv_num--;
2266                 }
2267
2268                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2269                     pfc_priv_num == 0)
2270                         break;
2271         }
2272
2273         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2274 }
2275
2276 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2277                                       struct hclge_pkt_buf_alloc *buf_alloc)
2278 {
2279 #define COMPENSATE_BUFFER       0x3C00
2280 #define COMPENSATE_HALF_MPS_NUM 5
2281 #define PRIV_WL_GAP             0x1800
2282
2283         u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2284         u32 tc_num = hclge_get_tc_num(hdev);
2285         u32 half_mps = hdev->mps >> 1;
2286         u32 min_rx_priv;
2287         unsigned int i;
2288
2289         if (tc_num)
2290                 rx_priv = rx_priv / tc_num;
2291
2292         if (tc_num <= NEED_RESERVE_TC_NUM)
2293                 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2294
2295         min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2296                         COMPENSATE_HALF_MPS_NUM * half_mps;
2297         min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2298         rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2299         if (rx_priv < min_rx_priv)
2300                 return false;
2301
2302         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2303                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2304
2305                 priv->enable = 0;
2306                 priv->wl.low = 0;
2307                 priv->wl.high = 0;
2308                 priv->buf_size = 0;
2309
2310                 if (!(hdev->hw_tc_map & BIT(i)))
2311                         continue;
2312
2313                 priv->enable = 1;
2314                 priv->buf_size = rx_priv;
2315                 priv->wl.high = rx_priv - hdev->dv_buf_size;
2316                 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2317         }
2318
2319         buf_alloc->s_buf.buf_size = 0;
2320
2321         return true;
2322 }
2323
2324 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2325  * @hdev: pointer to struct hclge_dev
2326  * @buf_alloc: pointer to buffer calculation data
2327  * @return: 0: calculate successful, negative: fail
2328  */
2329 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2330                                 struct hclge_pkt_buf_alloc *buf_alloc)
2331 {
2332         /* When DCB is not supported, rx private buffer is not allocated. */
2333         if (!hnae3_dev_dcb_supported(hdev)) {
2334                 u32 rx_all = hdev->pkt_buf_size;
2335
2336                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2337                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2338                         return -ENOMEM;
2339
2340                 return 0;
2341         }
2342
2343         if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2344                 return 0;
2345
2346         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2347                 return 0;
2348
2349         /* try to decrease the buffer size */
2350         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2351                 return 0;
2352
2353         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2354                 return 0;
2355
2356         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2357                 return 0;
2358
2359         return -ENOMEM;
2360 }
2361
2362 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2363                                    struct hclge_pkt_buf_alloc *buf_alloc)
2364 {
2365         struct hclge_rx_priv_buff_cmd *req;
2366         struct hclge_desc desc;
2367         int ret;
2368         int i;
2369
2370         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2371         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2372
2373         /* Alloc private buffer TCs */
2374         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2375                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2376
2377                 req->buf_num[i] =
2378                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2379                 req->buf_num[i] |=
2380                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2381         }
2382
2383         req->shared_buf =
2384                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2385                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
2386
2387         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2388         if (ret)
2389                 dev_err(&hdev->pdev->dev,
2390                         "rx private buffer alloc cmd failed %d\n", ret);
2391
2392         return ret;
2393 }
2394
2395 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2396                                    struct hclge_pkt_buf_alloc *buf_alloc)
2397 {
2398         struct hclge_rx_priv_wl_buf *req;
2399         struct hclge_priv_buf *priv;
2400         struct hclge_desc desc[2];
2401         int i, j;
2402         int ret;
2403
2404         for (i = 0; i < 2; i++) {
2405                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2406                                            false);
2407                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2408
2409                 /* The first descriptor set the NEXT bit to 1 */
2410                 if (i == 0)
2411                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2412                 else
2413                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2414
2415                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2416                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2417
2418                         priv = &buf_alloc->priv_buf[idx];
2419                         req->tc_wl[j].high =
2420                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2421                         req->tc_wl[j].high |=
2422                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2423                         req->tc_wl[j].low =
2424                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2425                         req->tc_wl[j].low |=
2426                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2427                 }
2428         }
2429
2430         /* Send 2 descriptor at one time */
2431         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2432         if (ret)
2433                 dev_err(&hdev->pdev->dev,
2434                         "rx private waterline config cmd failed %d\n",
2435                         ret);
2436         return ret;
2437 }
2438
2439 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2440                                     struct hclge_pkt_buf_alloc *buf_alloc)
2441 {
2442         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2443         struct hclge_rx_com_thrd *req;
2444         struct hclge_desc desc[2];
2445         struct hclge_tc_thrd *tc;
2446         int i, j;
2447         int ret;
2448
2449         for (i = 0; i < 2; i++) {
2450                 hclge_cmd_setup_basic_desc(&desc[i],
2451                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2452                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2453
2454                 /* The first descriptor set the NEXT bit to 1 */
2455                 if (i == 0)
2456                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2457                 else
2458                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2459
2460                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2461                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2462
2463                         req->com_thrd[j].high =
2464                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2465                         req->com_thrd[j].high |=
2466                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2467                         req->com_thrd[j].low =
2468                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2469                         req->com_thrd[j].low |=
2470                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2471                 }
2472         }
2473
2474         /* Send 2 descriptors at one time */
2475         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2476         if (ret)
2477                 dev_err(&hdev->pdev->dev,
2478                         "common threshold config cmd failed %d\n", ret);
2479         return ret;
2480 }
2481
2482 static int hclge_common_wl_config(struct hclge_dev *hdev,
2483                                   struct hclge_pkt_buf_alloc *buf_alloc)
2484 {
2485         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2486         struct hclge_rx_com_wl *req;
2487         struct hclge_desc desc;
2488         int ret;
2489
2490         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2491
2492         req = (struct hclge_rx_com_wl *)desc.data;
2493         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2494         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2495
2496         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2497         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2498
2499         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2500         if (ret)
2501                 dev_err(&hdev->pdev->dev,
2502                         "common waterline config cmd failed %d\n", ret);
2503
2504         return ret;
2505 }
2506
2507 int hclge_buffer_alloc(struct hclge_dev *hdev)
2508 {
2509         struct hclge_pkt_buf_alloc *pkt_buf;
2510         int ret;
2511
2512         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2513         if (!pkt_buf)
2514                 return -ENOMEM;
2515
2516         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2517         if (ret) {
2518                 dev_err(&hdev->pdev->dev,
2519                         "could not calc tx buffer size for all TCs %d\n", ret);
2520                 goto out;
2521         }
2522
2523         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2524         if (ret) {
2525                 dev_err(&hdev->pdev->dev,
2526                         "could not alloc tx buffers %d\n", ret);
2527                 goto out;
2528         }
2529
2530         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2531         if (ret) {
2532                 dev_err(&hdev->pdev->dev,
2533                         "could not calc rx priv buffer size for all TCs %d\n",
2534                         ret);
2535                 goto out;
2536         }
2537
2538         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2539         if (ret) {
2540                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2541                         ret);
2542                 goto out;
2543         }
2544
2545         if (hnae3_dev_dcb_supported(hdev)) {
2546                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2547                 if (ret) {
2548                         dev_err(&hdev->pdev->dev,
2549                                 "could not configure rx private waterline %d\n",
2550                                 ret);
2551                         goto out;
2552                 }
2553
2554                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2555                 if (ret) {
2556                         dev_err(&hdev->pdev->dev,
2557                                 "could not configure common threshold %d\n",
2558                                 ret);
2559                         goto out;
2560                 }
2561         }
2562
2563         ret = hclge_common_wl_config(hdev, pkt_buf);
2564         if (ret)
2565                 dev_err(&hdev->pdev->dev,
2566                         "could not configure common waterline %d\n", ret);
2567
2568 out:
2569         kfree(pkt_buf);
2570         return ret;
2571 }
2572
2573 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2574 {
2575         struct hnae3_handle *roce = &vport->roce;
2576         struct hnae3_handle *nic = &vport->nic;
2577         struct hclge_dev *hdev = vport->back;
2578
2579         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2580
2581         if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2582                 return -EINVAL;
2583
2584         roce->rinfo.base_vector = hdev->roce_base_vector;
2585
2586         roce->rinfo.netdev = nic->kinfo.netdev;
2587         roce->rinfo.roce_io_base = hdev->hw.io_base;
2588         roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2589
2590         roce->pdev = nic->pdev;
2591         roce->ae_algo = nic->ae_algo;
2592         roce->numa_node_mask = nic->numa_node_mask;
2593
2594         return 0;
2595 }
2596
2597 static int hclge_init_msi(struct hclge_dev *hdev)
2598 {
2599         struct pci_dev *pdev = hdev->pdev;
2600         int vectors;
2601         int i;
2602
2603         vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2604                                         hdev->num_msi,
2605                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2606         if (vectors < 0) {
2607                 dev_err(&pdev->dev,
2608                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2609                         vectors);
2610                 return vectors;
2611         }
2612         if (vectors < hdev->num_msi)
2613                 dev_warn(&hdev->pdev->dev,
2614                          "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2615                          hdev->num_msi, vectors);
2616
2617         hdev->num_msi = vectors;
2618         hdev->num_msi_left = vectors;
2619
2620         hdev->base_msi_vector = pdev->irq;
2621         hdev->roce_base_vector = hdev->base_msi_vector +
2622                                 hdev->num_nic_msi;
2623
2624         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2625                                            sizeof(u16), GFP_KERNEL);
2626         if (!hdev->vector_status) {
2627                 pci_free_irq_vectors(pdev);
2628                 return -ENOMEM;
2629         }
2630
2631         for (i = 0; i < hdev->num_msi; i++)
2632                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2633
2634         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2635                                         sizeof(int), GFP_KERNEL);
2636         if (!hdev->vector_irq) {
2637                 pci_free_irq_vectors(pdev);
2638                 return -ENOMEM;
2639         }
2640
2641         return 0;
2642 }
2643
2644 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2645 {
2646         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2647                 duplex = HCLGE_MAC_FULL;
2648
2649         return duplex;
2650 }
2651
2652 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2653                                       u8 duplex)
2654 {
2655         struct hclge_config_mac_speed_dup_cmd *req;
2656         struct hclge_desc desc;
2657         int ret;
2658
2659         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2660
2661         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2662
2663         if (duplex)
2664                 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2665
2666         switch (speed) {
2667         case HCLGE_MAC_SPEED_10M:
2668                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2669                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10M);
2670                 break;
2671         case HCLGE_MAC_SPEED_100M:
2672                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2673                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100M);
2674                 break;
2675         case HCLGE_MAC_SPEED_1G:
2676                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2677                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_1G);
2678                 break;
2679         case HCLGE_MAC_SPEED_10G:
2680                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2681                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10G);
2682                 break;
2683         case HCLGE_MAC_SPEED_25G:
2684                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2685                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_25G);
2686                 break;
2687         case HCLGE_MAC_SPEED_40G:
2688                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2689                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_40G);
2690                 break;
2691         case HCLGE_MAC_SPEED_50G:
2692                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2693                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_50G);
2694                 break;
2695         case HCLGE_MAC_SPEED_100G:
2696                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2697                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100G);
2698                 break;
2699         case HCLGE_MAC_SPEED_200G:
2700                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2701                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_200G);
2702                 break;
2703         default:
2704                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2705                 return -EINVAL;
2706         }
2707
2708         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2709                       1);
2710
2711         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2712         if (ret) {
2713                 dev_err(&hdev->pdev->dev,
2714                         "mac speed/duplex config cmd failed %d.\n", ret);
2715                 return ret;
2716         }
2717
2718         return 0;
2719 }
2720
2721 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2722 {
2723         struct hclge_mac *mac = &hdev->hw.mac;
2724         int ret;
2725
2726         duplex = hclge_check_speed_dup(duplex, speed);
2727         if (!mac->support_autoneg && mac->speed == speed &&
2728             mac->duplex == duplex)
2729                 return 0;
2730
2731         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2732         if (ret)
2733                 return ret;
2734
2735         hdev->hw.mac.speed = speed;
2736         hdev->hw.mac.duplex = duplex;
2737
2738         return 0;
2739 }
2740
2741 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2742                                      u8 duplex)
2743 {
2744         struct hclge_vport *vport = hclge_get_vport(handle);
2745         struct hclge_dev *hdev = vport->back;
2746
2747         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2748 }
2749
2750 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2751 {
2752         struct hclge_config_auto_neg_cmd *req;
2753         struct hclge_desc desc;
2754         u32 flag = 0;
2755         int ret;
2756
2757         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2758
2759         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2760         if (enable)
2761                 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2762         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2763
2764         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2765         if (ret)
2766                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2767                         ret);
2768
2769         return ret;
2770 }
2771
2772 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2773 {
2774         struct hclge_vport *vport = hclge_get_vport(handle);
2775         struct hclge_dev *hdev = vport->back;
2776
2777         if (!hdev->hw.mac.support_autoneg) {
2778                 if (enable) {
2779                         dev_err(&hdev->pdev->dev,
2780                                 "autoneg is not supported by current port\n");
2781                         return -EOPNOTSUPP;
2782                 } else {
2783                         return 0;
2784                 }
2785         }
2786
2787         return hclge_set_autoneg_en(hdev, enable);
2788 }
2789
2790 static int hclge_get_autoneg(struct hnae3_handle *handle)
2791 {
2792         struct hclge_vport *vport = hclge_get_vport(handle);
2793         struct hclge_dev *hdev = vport->back;
2794         struct phy_device *phydev = hdev->hw.mac.phydev;
2795
2796         if (phydev)
2797                 return phydev->autoneg;
2798
2799         return hdev->hw.mac.autoneg;
2800 }
2801
2802 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2803 {
2804         struct hclge_vport *vport = hclge_get_vport(handle);
2805         struct hclge_dev *hdev = vport->back;
2806         int ret;
2807
2808         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2809
2810         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2811         if (ret)
2812                 return ret;
2813         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2814 }
2815
2816 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2817 {
2818         struct hclge_vport *vport = hclge_get_vport(handle);
2819         struct hclge_dev *hdev = vport->back;
2820
2821         if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2822                 return hclge_set_autoneg_en(hdev, !halt);
2823
2824         return 0;
2825 }
2826
2827 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2828 {
2829         struct hclge_config_fec_cmd *req;
2830         struct hclge_desc desc;
2831         int ret;
2832
2833         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2834
2835         req = (struct hclge_config_fec_cmd *)desc.data;
2836         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2837                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2838         if (fec_mode & BIT(HNAE3_FEC_RS))
2839                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2840                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2841         if (fec_mode & BIT(HNAE3_FEC_BASER))
2842                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2843                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2844
2845         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2846         if (ret)
2847                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2848
2849         return ret;
2850 }
2851
2852 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2853 {
2854         struct hclge_vport *vport = hclge_get_vport(handle);
2855         struct hclge_dev *hdev = vport->back;
2856         struct hclge_mac *mac = &hdev->hw.mac;
2857         int ret;
2858
2859         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2860                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2861                 return -EINVAL;
2862         }
2863
2864         ret = hclge_set_fec_hw(hdev, fec_mode);
2865         if (ret)
2866                 return ret;
2867
2868         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2869         return 0;
2870 }
2871
2872 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2873                           u8 *fec_mode)
2874 {
2875         struct hclge_vport *vport = hclge_get_vport(handle);
2876         struct hclge_dev *hdev = vport->back;
2877         struct hclge_mac *mac = &hdev->hw.mac;
2878
2879         if (fec_ability)
2880                 *fec_ability = mac->fec_ability;
2881         if (fec_mode)
2882                 *fec_mode = mac->fec_mode;
2883 }
2884
2885 static int hclge_mac_init(struct hclge_dev *hdev)
2886 {
2887         struct hclge_mac *mac = &hdev->hw.mac;
2888         int ret;
2889
2890         hdev->support_sfp_query = true;
2891         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2892         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2893                                          hdev->hw.mac.duplex);
2894         if (ret)
2895                 return ret;
2896
2897         if (hdev->hw.mac.support_autoneg) {
2898                 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2899                 if (ret)
2900                         return ret;
2901         }
2902
2903         mac->link = 0;
2904
2905         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2906                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2907                 if (ret)
2908                         return ret;
2909         }
2910
2911         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2912         if (ret) {
2913                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2914                 return ret;
2915         }
2916
2917         ret = hclge_set_default_loopback(hdev);
2918         if (ret)
2919                 return ret;
2920
2921         ret = hclge_buffer_alloc(hdev);
2922         if (ret)
2923                 dev_err(&hdev->pdev->dev,
2924                         "allocate buffer fail, ret=%d\n", ret);
2925
2926         return ret;
2927 }
2928
2929 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2930 {
2931         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2932             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2933                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2934                                     hclge_wq, &hdev->service_task, 0);
2935 }
2936
2937 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2938 {
2939         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2940             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2941                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2942                                     hclge_wq, &hdev->service_task, 0);
2943 }
2944
2945 static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
2946 {
2947         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2948             !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
2949                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2950                                     hclge_wq, &hdev->service_task, 0);
2951 }
2952
2953 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2954 {
2955         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2956             !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2957                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2958                                     hclge_wq, &hdev->service_task,
2959                                     delay_time);
2960 }
2961
2962 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2963 {
2964         struct hclge_link_status_cmd *req;
2965         struct hclge_desc desc;
2966         int ret;
2967
2968         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2969         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2970         if (ret) {
2971                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2972                         ret);
2973                 return ret;
2974         }
2975
2976         req = (struct hclge_link_status_cmd *)desc.data;
2977         *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2978                 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2979
2980         return 0;
2981 }
2982
2983 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2984 {
2985         struct phy_device *phydev = hdev->hw.mac.phydev;
2986
2987         *link_status = HCLGE_LINK_STATUS_DOWN;
2988
2989         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2990                 return 0;
2991
2992         if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2993                 return 0;
2994
2995         return hclge_get_mac_link_status(hdev, link_status);
2996 }
2997
2998 static void hclge_push_link_status(struct hclge_dev *hdev)
2999 {
3000         struct hclge_vport *vport;
3001         int ret;
3002         u16 i;
3003
3004         for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
3005                 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
3006
3007                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
3008                     vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
3009                         continue;
3010
3011                 ret = hclge_push_vf_link_status(vport);
3012                 if (ret) {
3013                         dev_err(&hdev->pdev->dev,
3014                                 "failed to push link status to vf%u, ret = %d\n",
3015                                 i, ret);
3016                 }
3017         }
3018 }
3019
3020 static void hclge_update_link_status(struct hclge_dev *hdev)
3021 {
3022         struct hnae3_handle *rhandle = &hdev->vport[0].roce;
3023         struct hnae3_handle *handle = &hdev->vport[0].nic;
3024         struct hnae3_client *rclient = hdev->roce_client;
3025         struct hnae3_client *client = hdev->nic_client;
3026         int state;
3027         int ret;
3028
3029         if (!client)
3030                 return;
3031
3032         if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
3033                 return;
3034
3035         ret = hclge_get_mac_phy_link(hdev, &state);
3036         if (ret) {
3037                 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
3038                 return;
3039         }
3040
3041         if (state != hdev->hw.mac.link) {
3042                 hdev->hw.mac.link = state;
3043                 client->ops->link_status_change(handle, state);
3044                 hclge_config_mac_tnl_int(hdev, state);
3045                 if (rclient && rclient->ops->link_status_change)
3046                         rclient->ops->link_status_change(rhandle, state);
3047
3048                 hclge_push_link_status(hdev);
3049         }
3050
3051         clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
3052 }
3053
3054 static void hclge_update_port_capability(struct hclge_dev *hdev,
3055                                          struct hclge_mac *mac)
3056 {
3057         if (hnae3_dev_fec_supported(hdev))
3058                 /* update fec ability by speed */
3059                 hclge_convert_setting_fec(mac);
3060
3061         /* firmware can not identify back plane type, the media type
3062          * read from configuration can help deal it
3063          */
3064         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
3065             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
3066                 mac->module_type = HNAE3_MODULE_TYPE_KR;
3067         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3068                 mac->module_type = HNAE3_MODULE_TYPE_TP;
3069
3070         if (mac->support_autoneg) {
3071                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
3072                 linkmode_copy(mac->advertising, mac->supported);
3073         } else {
3074                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
3075                                    mac->supported);
3076                 linkmode_zero(mac->advertising);
3077         }
3078 }
3079
3080 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
3081 {
3082         struct hclge_sfp_info_cmd *resp;
3083         struct hclge_desc desc;
3084         int ret;
3085
3086         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3087         resp = (struct hclge_sfp_info_cmd *)desc.data;
3088         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3089         if (ret == -EOPNOTSUPP) {
3090                 dev_warn(&hdev->pdev->dev,
3091                          "IMP do not support get SFP speed %d\n", ret);
3092                 return ret;
3093         } else if (ret) {
3094                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
3095                 return ret;
3096         }
3097
3098         *speed = le32_to_cpu(resp->speed);
3099
3100         return 0;
3101 }
3102
3103 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
3104 {
3105         struct hclge_sfp_info_cmd *resp;
3106         struct hclge_desc desc;
3107         int ret;
3108
3109         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3110         resp = (struct hclge_sfp_info_cmd *)desc.data;
3111
3112         resp->query_type = QUERY_ACTIVE_SPEED;
3113
3114         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3115         if (ret == -EOPNOTSUPP) {
3116                 dev_warn(&hdev->pdev->dev,
3117                          "IMP does not support get SFP info %d\n", ret);
3118                 return ret;
3119         } else if (ret) {
3120                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3121                 return ret;
3122         }
3123
3124         /* In some case, mac speed get from IMP may be 0, it shouldn't be
3125          * set to mac->speed.
3126          */
3127         if (!le32_to_cpu(resp->speed))
3128                 return 0;
3129
3130         mac->speed = le32_to_cpu(resp->speed);
3131         /* if resp->speed_ability is 0, it means it's an old version
3132          * firmware, do not update these params
3133          */
3134         if (resp->speed_ability) {
3135                 mac->module_type = le32_to_cpu(resp->module_type);
3136                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
3137                 mac->autoneg = resp->autoneg;
3138                 mac->support_autoneg = resp->autoneg_ability;
3139                 mac->speed_type = QUERY_ACTIVE_SPEED;
3140                 if (!resp->active_fec)
3141                         mac->fec_mode = 0;
3142                 else
3143                         mac->fec_mode = BIT(resp->active_fec);
3144         } else {
3145                 mac->speed_type = QUERY_SFP_SPEED;
3146         }
3147
3148         return 0;
3149 }
3150
3151 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3152                                         struct ethtool_link_ksettings *cmd)
3153 {
3154         struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3155         struct hclge_vport *vport = hclge_get_vport(handle);
3156         struct hclge_phy_link_ksetting_0_cmd *req0;
3157         struct hclge_phy_link_ksetting_1_cmd *req1;
3158         u32 supported, advertising, lp_advertising;
3159         struct hclge_dev *hdev = vport->back;
3160         int ret;
3161
3162         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3163                                    true);
3164         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3165         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3166                                    true);
3167
3168         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3169         if (ret) {
3170                 dev_err(&hdev->pdev->dev,
3171                         "failed to get phy link ksetting, ret = %d.\n", ret);
3172                 return ret;
3173         }
3174
3175         req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3176         cmd->base.autoneg = req0->autoneg;
3177         cmd->base.speed = le32_to_cpu(req0->speed);
3178         cmd->base.duplex = req0->duplex;
3179         cmd->base.port = req0->port;
3180         cmd->base.transceiver = req0->transceiver;
3181         cmd->base.phy_address = req0->phy_address;
3182         cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3183         cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3184         supported = le32_to_cpu(req0->supported);
3185         advertising = le32_to_cpu(req0->advertising);
3186         lp_advertising = le32_to_cpu(req0->lp_advertising);
3187         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3188                                                 supported);
3189         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3190                                                 advertising);
3191         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3192                                                 lp_advertising);
3193
3194         req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3195         cmd->base.master_slave_cfg = req1->master_slave_cfg;
3196         cmd->base.master_slave_state = req1->master_slave_state;
3197
3198         return 0;
3199 }
3200
3201 static int
3202 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3203                              const struct ethtool_link_ksettings *cmd)
3204 {
3205         struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3206         struct hclge_vport *vport = hclge_get_vport(handle);
3207         struct hclge_phy_link_ksetting_0_cmd *req0;
3208         struct hclge_phy_link_ksetting_1_cmd *req1;
3209         struct hclge_dev *hdev = vport->back;
3210         u32 advertising;
3211         int ret;
3212
3213         if (cmd->base.autoneg == AUTONEG_DISABLE &&
3214             ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3215              (cmd->base.duplex != DUPLEX_HALF &&
3216               cmd->base.duplex != DUPLEX_FULL)))
3217                 return -EINVAL;
3218
3219         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3220                                    false);
3221         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3222         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3223                                    false);
3224
3225         req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3226         req0->autoneg = cmd->base.autoneg;
3227         req0->speed = cpu_to_le32(cmd->base.speed);
3228         req0->duplex = cmd->base.duplex;
3229         ethtool_convert_link_mode_to_legacy_u32(&advertising,
3230                                                 cmd->link_modes.advertising);
3231         req0->advertising = cpu_to_le32(advertising);
3232         req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3233
3234         req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3235         req1->master_slave_cfg = cmd->base.master_slave_cfg;
3236
3237         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3238         if (ret) {
3239                 dev_err(&hdev->pdev->dev,
3240                         "failed to set phy link ksettings, ret = %d.\n", ret);
3241                 return ret;
3242         }
3243
3244         hdev->hw.mac.autoneg = cmd->base.autoneg;
3245         hdev->hw.mac.speed = cmd->base.speed;
3246         hdev->hw.mac.duplex = cmd->base.duplex;
3247         linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3248
3249         return 0;
3250 }
3251
3252 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3253 {
3254         struct ethtool_link_ksettings cmd;
3255         int ret;
3256
3257         if (!hnae3_dev_phy_imp_supported(hdev))
3258                 return 0;
3259
3260         ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3261         if (ret)
3262                 return ret;
3263
3264         hdev->hw.mac.autoneg = cmd.base.autoneg;
3265         hdev->hw.mac.speed = cmd.base.speed;
3266         hdev->hw.mac.duplex = cmd.base.duplex;
3267
3268         return 0;
3269 }
3270
3271 static int hclge_tp_port_init(struct hclge_dev *hdev)
3272 {
3273         struct ethtool_link_ksettings cmd;
3274
3275         if (!hnae3_dev_phy_imp_supported(hdev))
3276                 return 0;
3277
3278         cmd.base.autoneg = hdev->hw.mac.autoneg;
3279         cmd.base.speed = hdev->hw.mac.speed;
3280         cmd.base.duplex = hdev->hw.mac.duplex;
3281         linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3282
3283         return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3284 }
3285
3286 static int hclge_update_port_info(struct hclge_dev *hdev)
3287 {
3288         struct hclge_mac *mac = &hdev->hw.mac;
3289         int speed = HCLGE_MAC_SPEED_UNKNOWN;
3290         int ret;
3291
3292         /* get the port info from SFP cmd if not copper port */
3293         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3294                 return hclge_update_tp_port_info(hdev);
3295
3296         /* if IMP does not support get SFP/qSFP info, return directly */
3297         if (!hdev->support_sfp_query)
3298                 return 0;
3299
3300         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3301                 ret = hclge_get_sfp_info(hdev, mac);
3302         else
3303                 ret = hclge_get_sfp_speed(hdev, &speed);
3304
3305         if (ret == -EOPNOTSUPP) {
3306                 hdev->support_sfp_query = false;
3307                 return ret;
3308         } else if (ret) {
3309                 return ret;
3310         }
3311
3312         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3313                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3314                         hclge_update_port_capability(hdev, mac);
3315                         return 0;
3316                 }
3317                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3318                                                HCLGE_MAC_FULL);
3319         } else {
3320                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3321                         return 0; /* do nothing if no SFP */
3322
3323                 /* must config full duplex for SFP */
3324                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3325         }
3326 }
3327
3328 static int hclge_get_status(struct hnae3_handle *handle)
3329 {
3330         struct hclge_vport *vport = hclge_get_vport(handle);
3331         struct hclge_dev *hdev = vport->back;
3332
3333         hclge_update_link_status(hdev);
3334
3335         return hdev->hw.mac.link;
3336 }
3337
3338 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3339 {
3340         if (!pci_num_vf(hdev->pdev)) {
3341                 dev_err(&hdev->pdev->dev,
3342                         "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3343                 return NULL;
3344         }
3345
3346         if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3347                 dev_err(&hdev->pdev->dev,
3348                         "vf id(%d) is out of range(0 <= vfid < %d)\n",
3349                         vf, pci_num_vf(hdev->pdev));
3350                 return NULL;
3351         }
3352
3353         /* VF start from 1 in vport */
3354         vf += HCLGE_VF_VPORT_START_NUM;
3355         return &hdev->vport[vf];
3356 }
3357
3358 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3359                                struct ifla_vf_info *ivf)
3360 {
3361         struct hclge_vport *vport = hclge_get_vport(handle);
3362         struct hclge_dev *hdev = vport->back;
3363
3364         vport = hclge_get_vf_vport(hdev, vf);
3365         if (!vport)
3366                 return -EINVAL;
3367
3368         ivf->vf = vf;
3369         ivf->linkstate = vport->vf_info.link_state;
3370         ivf->spoofchk = vport->vf_info.spoofchk;
3371         ivf->trusted = vport->vf_info.trusted;
3372         ivf->min_tx_rate = 0;
3373         ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3374         ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3375         ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3376         ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3377         ether_addr_copy(ivf->mac, vport->vf_info.mac);
3378
3379         return 0;
3380 }
3381
3382 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3383                                    int link_state)
3384 {
3385         struct hclge_vport *vport = hclge_get_vport(handle);
3386         struct hclge_dev *hdev = vport->back;
3387         int link_state_old;
3388         int ret;
3389
3390         vport = hclge_get_vf_vport(hdev, vf);
3391         if (!vport)
3392                 return -EINVAL;
3393
3394         link_state_old = vport->vf_info.link_state;
3395         vport->vf_info.link_state = link_state;
3396
3397         ret = hclge_push_vf_link_status(vport);
3398         if (ret) {
3399                 vport->vf_info.link_state = link_state_old;
3400                 dev_err(&hdev->pdev->dev,
3401                         "failed to push vf%d link status, ret = %d\n", vf, ret);
3402         }
3403
3404         return ret;
3405 }
3406
3407 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3408 {
3409         u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
3410
3411         /* fetch the events from their corresponding regs */
3412         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3413         msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3414         hw_err_src_reg = hclge_read_dev(&hdev->hw,
3415                                         HCLGE_RAS_PF_OTHER_INT_STS_REG);
3416
3417         /* Assumption: If by any chance reset and mailbox events are reported
3418          * together then we will only process reset event in this go and will
3419          * defer the processing of the mailbox events. Since, we would have not
3420          * cleared RX CMDQ event this time we would receive again another
3421          * interrupt from H/W just for the mailbox.
3422          *
3423          * check for vector0 reset event sources
3424          */
3425         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3426                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3427                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3428                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3429                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3430                 hdev->rst_stats.imp_rst_cnt++;
3431                 return HCLGE_VECTOR0_EVENT_RST;
3432         }
3433
3434         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3435                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3436                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3437                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3438                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3439                 hdev->rst_stats.global_rst_cnt++;
3440                 return HCLGE_VECTOR0_EVENT_RST;
3441         }
3442
3443         /* check for vector0 msix event and hardware error event source */
3444         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK ||
3445             hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK)
3446                 return HCLGE_VECTOR0_EVENT_ERR;
3447
3448         /* check for vector0 ptp event source */
3449         if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) {
3450                 *clearval = msix_src_reg;
3451                 return HCLGE_VECTOR0_EVENT_PTP;
3452         }
3453
3454         /* check for vector0 mailbox(=CMDQ RX) event source */
3455         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3456                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3457                 *clearval = cmdq_src_reg;
3458                 return HCLGE_VECTOR0_EVENT_MBX;
3459         }
3460
3461         /* print other vector0 event source */
3462         dev_info(&hdev->pdev->dev,
3463                  "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
3464                  cmdq_src_reg, hw_err_src_reg, msix_src_reg);
3465
3466         return HCLGE_VECTOR0_EVENT_OTHER;
3467 }
3468
3469 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3470                                     u32 regclr)
3471 {
3472         switch (event_type) {
3473         case HCLGE_VECTOR0_EVENT_PTP:
3474         case HCLGE_VECTOR0_EVENT_RST:
3475                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3476                 break;
3477         case HCLGE_VECTOR0_EVENT_MBX:
3478                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3479                 break;
3480         default:
3481                 break;
3482         }
3483 }
3484
3485 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3486 {
3487         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3488                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3489                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3490                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3491         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3492 }
3493
3494 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3495 {
3496         writel(enable ? 1 : 0, vector->addr);
3497 }
3498
3499 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3500 {
3501         struct hclge_dev *hdev = data;
3502         unsigned long flags;
3503         u32 clearval = 0;
3504         u32 event_cause;
3505
3506         hclge_enable_vector(&hdev->misc_vector, false);
3507         event_cause = hclge_check_event_cause(hdev, &clearval);
3508
3509         /* vector 0 interrupt is shared with reset and mailbox source events. */
3510         switch (event_cause) {
3511         case HCLGE_VECTOR0_EVENT_ERR:
3512                 hclge_errhand_task_schedule(hdev);
3513                 break;
3514         case HCLGE_VECTOR0_EVENT_RST:
3515                 hclge_reset_task_schedule(hdev);
3516                 break;
3517         case HCLGE_VECTOR0_EVENT_PTP:
3518                 spin_lock_irqsave(&hdev->ptp->lock, flags);
3519                 hclge_ptp_clean_tx_hwts(hdev);
3520                 spin_unlock_irqrestore(&hdev->ptp->lock, flags);
3521                 break;
3522         case HCLGE_VECTOR0_EVENT_MBX:
3523                 /* If we are here then,
3524                  * 1. Either we are not handling any mbx task and we are not
3525                  *    scheduled as well
3526                  *                        OR
3527                  * 2. We could be handling a mbx task but nothing more is
3528                  *    scheduled.
3529                  * In both cases, we should schedule mbx task as there are more
3530                  * mbx messages reported by this interrupt.
3531                  */
3532                 hclge_mbx_task_schedule(hdev);
3533                 break;
3534         default:
3535                 dev_warn(&hdev->pdev->dev,
3536                          "received unknown or unhandled event of vector0\n");
3537                 break;
3538         }
3539
3540         hclge_clear_event_cause(hdev, event_cause, clearval);
3541
3542         /* Enable interrupt if it is not caused by reset event or error event */
3543         if (event_cause == HCLGE_VECTOR0_EVENT_PTP ||
3544             event_cause == HCLGE_VECTOR0_EVENT_MBX ||
3545             event_cause == HCLGE_VECTOR0_EVENT_OTHER)
3546                 hclge_enable_vector(&hdev->misc_vector, true);
3547
3548         return IRQ_HANDLED;
3549 }
3550
3551 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3552 {
3553         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3554                 dev_warn(&hdev->pdev->dev,
3555                          "vector(vector_id %d) has been freed.\n", vector_id);
3556                 return;
3557         }
3558
3559         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3560         hdev->num_msi_left += 1;
3561         hdev->num_msi_used -= 1;
3562 }
3563
3564 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3565 {
3566         struct hclge_misc_vector *vector = &hdev->misc_vector;
3567
3568         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3569
3570         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3571         hdev->vector_status[0] = 0;
3572
3573         hdev->num_msi_left -= 1;
3574         hdev->num_msi_used += 1;
3575 }
3576
3577 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3578                                       const cpumask_t *mask)
3579 {
3580         struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3581                                               affinity_notify);
3582
3583         cpumask_copy(&hdev->affinity_mask, mask);
3584 }
3585
3586 static void hclge_irq_affinity_release(struct kref *ref)
3587 {
3588 }
3589
3590 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3591 {
3592         irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3593                               &hdev->affinity_mask);
3594
3595         hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3596         hdev->affinity_notify.release = hclge_irq_affinity_release;
3597         irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3598                                   &hdev->affinity_notify);
3599 }
3600
3601 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3602 {
3603         irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3604         irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3605 }
3606
3607 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3608 {
3609         int ret;
3610
3611         hclge_get_misc_vector(hdev);
3612
3613         /* this would be explicitly freed in the end */
3614         snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3615                  HCLGE_NAME, pci_name(hdev->pdev));
3616         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3617                           0, hdev->misc_vector.name, hdev);
3618         if (ret) {
3619                 hclge_free_vector(hdev, 0);
3620                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3621                         hdev->misc_vector.vector_irq);
3622         }
3623
3624         return ret;
3625 }
3626
3627 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3628 {
3629         free_irq(hdev->misc_vector.vector_irq, hdev);
3630         hclge_free_vector(hdev, 0);
3631 }
3632
3633 int hclge_notify_client(struct hclge_dev *hdev,
3634                         enum hnae3_reset_notify_type type)
3635 {
3636         struct hnae3_handle *handle = &hdev->vport[0].nic;
3637         struct hnae3_client *client = hdev->nic_client;
3638         int ret;
3639
3640         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3641                 return 0;
3642
3643         if (!client->ops->reset_notify)
3644                 return -EOPNOTSUPP;
3645
3646         ret = client->ops->reset_notify(handle, type);
3647         if (ret)
3648                 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3649                         type, ret);
3650
3651         return ret;
3652 }
3653
3654 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3655                                     enum hnae3_reset_notify_type type)
3656 {
3657         struct hnae3_handle *handle = &hdev->vport[0].roce;
3658         struct hnae3_client *client = hdev->roce_client;
3659         int ret;
3660
3661         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3662                 return 0;
3663
3664         if (!client->ops->reset_notify)
3665                 return -EOPNOTSUPP;
3666
3667         ret = client->ops->reset_notify(handle, type);
3668         if (ret)
3669                 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3670                         type, ret);
3671
3672         return ret;
3673 }
3674
3675 static int hclge_reset_wait(struct hclge_dev *hdev)
3676 {
3677 #define HCLGE_RESET_WATI_MS     100
3678 #define HCLGE_RESET_WAIT_CNT    350
3679
3680         u32 val, reg, reg_bit;
3681         u32 cnt = 0;
3682
3683         switch (hdev->reset_type) {
3684         case HNAE3_IMP_RESET:
3685                 reg = HCLGE_GLOBAL_RESET_REG;
3686                 reg_bit = HCLGE_IMP_RESET_BIT;
3687                 break;
3688         case HNAE3_GLOBAL_RESET:
3689                 reg = HCLGE_GLOBAL_RESET_REG;
3690                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3691                 break;
3692         case HNAE3_FUNC_RESET:
3693                 reg = HCLGE_FUN_RST_ING;
3694                 reg_bit = HCLGE_FUN_RST_ING_B;
3695                 break;
3696         default:
3697                 dev_err(&hdev->pdev->dev,
3698                         "Wait for unsupported reset type: %d\n",
3699                         hdev->reset_type);
3700                 return -EINVAL;
3701         }
3702
3703         val = hclge_read_dev(&hdev->hw, reg);
3704         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3705                 msleep(HCLGE_RESET_WATI_MS);
3706                 val = hclge_read_dev(&hdev->hw, reg);
3707                 cnt++;
3708         }
3709
3710         if (cnt >= HCLGE_RESET_WAIT_CNT) {
3711                 dev_warn(&hdev->pdev->dev,
3712                          "Wait for reset timeout: %d\n", hdev->reset_type);
3713                 return -EBUSY;
3714         }
3715
3716         return 0;
3717 }
3718
3719 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3720 {
3721         struct hclge_vf_rst_cmd *req;
3722         struct hclge_desc desc;
3723
3724         req = (struct hclge_vf_rst_cmd *)desc.data;
3725         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3726         req->dest_vfid = func_id;
3727
3728         if (reset)
3729                 req->vf_rst = 0x1;
3730
3731         return hclge_cmd_send(&hdev->hw, &desc, 1);
3732 }
3733
3734 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3735 {
3736         int i;
3737
3738         for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3739                 struct hclge_vport *vport = &hdev->vport[i];
3740                 int ret;
3741
3742                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3743                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3744                 if (ret) {
3745                         dev_err(&hdev->pdev->dev,
3746                                 "set vf(%u) rst failed %d!\n",
3747                                 vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3748                                 ret);
3749                         return ret;
3750                 }
3751
3752                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3753                         continue;
3754
3755                 /* Inform VF to process the reset.
3756                  * hclge_inform_reset_assert_to_vf may fail if VF
3757                  * driver is not loaded.
3758                  */
3759                 ret = hclge_inform_reset_assert_to_vf(vport);
3760                 if (ret)
3761                         dev_warn(&hdev->pdev->dev,
3762                                  "inform reset to vf(%u) failed %d!\n",
3763                                  vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3764                                  ret);
3765         }
3766
3767         return 0;
3768 }
3769
3770 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3771 {
3772         if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3773             test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3774             test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3775                 return;
3776
3777         hclge_mbx_handler(hdev);
3778
3779         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3780 }
3781
3782 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3783 {
3784         struct hclge_pf_rst_sync_cmd *req;
3785         struct hclge_desc desc;
3786         int cnt = 0;
3787         int ret;
3788
3789         req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3790         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3791
3792         do {
3793                 /* vf need to down netdev by mbx during PF or FLR reset */
3794                 hclge_mailbox_service_task(hdev);
3795
3796                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3797                 /* for compatible with old firmware, wait
3798                  * 100 ms for VF to stop IO
3799                  */
3800                 if (ret == -EOPNOTSUPP) {
3801                         msleep(HCLGE_RESET_SYNC_TIME);
3802                         return;
3803                 } else if (ret) {
3804                         dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3805                                  ret);
3806                         return;
3807                 } else if (req->all_vf_ready) {
3808                         return;
3809                 }
3810                 msleep(HCLGE_PF_RESET_SYNC_TIME);
3811                 hclge_cmd_reuse_desc(&desc, true);
3812         } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3813
3814         dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3815 }
3816
3817 void hclge_report_hw_error(struct hclge_dev *hdev,
3818                            enum hnae3_hw_error_type type)
3819 {
3820         struct hnae3_client *client = hdev->nic_client;
3821
3822         if (!client || !client->ops->process_hw_error ||
3823             !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3824                 return;
3825
3826         client->ops->process_hw_error(&hdev->vport[0].nic, type);
3827 }
3828
3829 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3830 {
3831         u32 reg_val;
3832
3833         reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3834         if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3835                 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3836                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3837                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3838         }
3839
3840         if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3841                 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3842                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3843                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3844         }
3845 }
3846
3847 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3848 {
3849         struct hclge_desc desc;
3850         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3851         int ret;
3852
3853         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3854         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3855         req->fun_reset_vfid = func_id;
3856
3857         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3858         if (ret)
3859                 dev_err(&hdev->pdev->dev,
3860                         "send function reset cmd fail, status =%d\n", ret);
3861
3862         return ret;
3863 }
3864
3865 static void hclge_do_reset(struct hclge_dev *hdev)
3866 {
3867         struct hnae3_handle *handle = &hdev->vport[0].nic;
3868         struct pci_dev *pdev = hdev->pdev;
3869         u32 val;
3870
3871         if (hclge_get_hw_reset_stat(handle)) {
3872                 dev_info(&pdev->dev, "hardware reset not finish\n");
3873                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3874                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3875                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3876                 return;
3877         }
3878
3879         switch (hdev->reset_type) {
3880         case HNAE3_IMP_RESET:
3881                 dev_info(&pdev->dev, "IMP reset requested\n");
3882                 val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3883                 hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1);
3884                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val);
3885                 break;
3886         case HNAE3_GLOBAL_RESET:
3887                 dev_info(&pdev->dev, "global reset requested\n");
3888                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3889                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3890                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3891                 break;
3892         case HNAE3_FUNC_RESET:
3893                 dev_info(&pdev->dev, "PF reset requested\n");
3894                 /* schedule again to check later */
3895                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3896                 hclge_reset_task_schedule(hdev);
3897                 break;
3898         default:
3899                 dev_warn(&pdev->dev,
3900                          "unsupported reset type: %d\n", hdev->reset_type);
3901                 break;
3902         }
3903 }
3904
3905 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3906                                                    unsigned long *addr)
3907 {
3908         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3909         struct hclge_dev *hdev = ae_dev->priv;
3910
3911         /* return the highest priority reset level amongst all */
3912         if (test_bit(HNAE3_IMP_RESET, addr)) {
3913                 rst_level = HNAE3_IMP_RESET;
3914                 clear_bit(HNAE3_IMP_RESET, addr);
3915                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3916                 clear_bit(HNAE3_FUNC_RESET, addr);
3917         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3918                 rst_level = HNAE3_GLOBAL_RESET;
3919                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3920                 clear_bit(HNAE3_FUNC_RESET, addr);
3921         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3922                 rst_level = HNAE3_FUNC_RESET;
3923                 clear_bit(HNAE3_FUNC_RESET, addr);
3924         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3925                 rst_level = HNAE3_FLR_RESET;
3926                 clear_bit(HNAE3_FLR_RESET, addr);
3927         }
3928
3929         if (hdev->reset_type != HNAE3_NONE_RESET &&
3930             rst_level < hdev->reset_type)
3931                 return HNAE3_NONE_RESET;
3932
3933         return rst_level;
3934 }
3935
3936 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3937 {
3938         u32 clearval = 0;
3939
3940         switch (hdev->reset_type) {
3941         case HNAE3_IMP_RESET:
3942                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3943                 break;
3944         case HNAE3_GLOBAL_RESET:
3945                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3946                 break;
3947         default:
3948                 break;
3949         }
3950
3951         if (!clearval)
3952                 return;
3953
3954         /* For revision 0x20, the reset interrupt source
3955          * can only be cleared after hardware reset done
3956          */
3957         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3958                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3959                                 clearval);
3960
3961         hclge_enable_vector(&hdev->misc_vector, true);
3962 }
3963
3964 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3965 {
3966         u32 reg_val;
3967
3968         reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3969         if (enable)
3970                 reg_val |= HCLGE_NIC_SW_RST_RDY;
3971         else
3972                 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3973
3974         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3975 }
3976
3977 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3978 {
3979         int ret;
3980
3981         ret = hclge_set_all_vf_rst(hdev, true);
3982         if (ret)
3983                 return ret;
3984
3985         hclge_func_reset_sync_vf(hdev);
3986
3987         return 0;
3988 }
3989
3990 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3991 {
3992         u32 reg_val;
3993         int ret = 0;
3994
3995         switch (hdev->reset_type) {
3996         case HNAE3_FUNC_RESET:
3997                 ret = hclge_func_reset_notify_vf(hdev);
3998                 if (ret)
3999                         return ret;
4000
4001                 ret = hclge_func_reset_cmd(hdev, 0);
4002                 if (ret) {
4003                         dev_err(&hdev->pdev->dev,
4004                                 "asserting function reset fail %d!\n", ret);
4005                         return ret;
4006                 }
4007
4008                 /* After performaning pf reset, it is not necessary to do the
4009                  * mailbox handling or send any command to firmware, because
4010                  * any mailbox handling or command to firmware is only valid
4011                  * after hclge_cmd_init is called.
4012                  */
4013                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
4014                 hdev->rst_stats.pf_rst_cnt++;
4015                 break;
4016         case HNAE3_FLR_RESET:
4017                 ret = hclge_func_reset_notify_vf(hdev);
4018                 if (ret)
4019                         return ret;
4020                 break;
4021         case HNAE3_IMP_RESET:
4022                 hclge_handle_imp_error(hdev);
4023                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
4024                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
4025                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
4026                 break;
4027         default:
4028                 break;
4029         }
4030
4031         /* inform hardware that preparatory work is done */
4032         msleep(HCLGE_RESET_SYNC_TIME);
4033         hclge_reset_handshake(hdev, true);
4034         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
4035
4036         return ret;
4037 }
4038
4039 static void hclge_show_rst_info(struct hclge_dev *hdev)
4040 {
4041         char *buf;
4042
4043         buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
4044         if (!buf)
4045                 return;
4046
4047         hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
4048
4049         dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
4050
4051         kfree(buf);
4052 }
4053
4054 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
4055 {
4056 #define MAX_RESET_FAIL_CNT 5
4057
4058         if (hdev->reset_pending) {
4059                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
4060                          hdev->reset_pending);
4061                 return true;
4062         } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
4063                    HCLGE_RESET_INT_M) {
4064                 dev_info(&hdev->pdev->dev,
4065                          "reset failed because new reset interrupt\n");
4066                 hclge_clear_reset_cause(hdev);
4067                 return false;
4068         } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
4069                 hdev->rst_stats.reset_fail_cnt++;
4070                 set_bit(hdev->reset_type, &hdev->reset_pending);
4071                 dev_info(&hdev->pdev->dev,
4072                          "re-schedule reset task(%u)\n",
4073                          hdev->rst_stats.reset_fail_cnt);
4074                 return true;
4075         }
4076
4077         hclge_clear_reset_cause(hdev);
4078
4079         /* recover the handshake status when reset fail */
4080         hclge_reset_handshake(hdev, true);
4081
4082         dev_err(&hdev->pdev->dev, "Reset fail!\n");
4083
4084         hclge_show_rst_info(hdev);
4085
4086         set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4087
4088         return false;
4089 }
4090
4091 static void hclge_update_reset_level(struct hclge_dev *hdev)
4092 {
4093         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4094         enum hnae3_reset_type reset_level;
4095
4096         /* reset request will not be set during reset, so clear
4097          * pending reset request to avoid unnecessary reset
4098          * caused by the same reason.
4099          */
4100         hclge_get_reset_level(ae_dev, &hdev->reset_request);
4101
4102         /* if default_reset_request has a higher level reset request,
4103          * it should be handled as soon as possible. since some errors
4104          * need this kind of reset to fix.
4105          */
4106         reset_level = hclge_get_reset_level(ae_dev,
4107                                             &hdev->default_reset_request);
4108         if (reset_level != HNAE3_NONE_RESET)
4109                 set_bit(reset_level, &hdev->reset_request);
4110 }
4111
4112 static int hclge_set_rst_done(struct hclge_dev *hdev)
4113 {
4114         struct hclge_pf_rst_done_cmd *req;
4115         struct hclge_desc desc;
4116         int ret;
4117
4118         req = (struct hclge_pf_rst_done_cmd *)desc.data;
4119         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
4120         req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
4121
4122         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4123         /* To be compatible with the old firmware, which does not support
4124          * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4125          * return success
4126          */
4127         if (ret == -EOPNOTSUPP) {
4128                 dev_warn(&hdev->pdev->dev,
4129                          "current firmware does not support command(0x%x)!\n",
4130                          HCLGE_OPC_PF_RST_DONE);
4131                 return 0;
4132         } else if (ret) {
4133                 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4134                         ret);
4135         }
4136
4137         return ret;
4138 }
4139
4140 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4141 {
4142         int ret = 0;
4143
4144         switch (hdev->reset_type) {
4145         case HNAE3_FUNC_RESET:
4146         case HNAE3_FLR_RESET:
4147                 ret = hclge_set_all_vf_rst(hdev, false);
4148                 break;
4149         case HNAE3_GLOBAL_RESET:
4150         case HNAE3_IMP_RESET:
4151                 ret = hclge_set_rst_done(hdev);
4152                 break;
4153         default:
4154                 break;
4155         }
4156
4157         /* clear up the handshake status after re-initialize done */
4158         hclge_reset_handshake(hdev, false);
4159
4160         return ret;
4161 }
4162
4163 static int hclge_reset_stack(struct hclge_dev *hdev)
4164 {
4165         int ret;
4166
4167         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4168         if (ret)
4169                 return ret;
4170
4171         ret = hclge_reset_ae_dev(hdev->ae_dev);
4172         if (ret)
4173                 return ret;
4174
4175         return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4176 }
4177
4178 static int hclge_reset_prepare(struct hclge_dev *hdev)
4179 {
4180         int ret;
4181
4182         hdev->rst_stats.reset_cnt++;
4183         /* perform reset of the stack & ae device for a client */
4184         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4185         if (ret)
4186                 return ret;
4187
4188         rtnl_lock();
4189         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4190         rtnl_unlock();
4191         if (ret)
4192                 return ret;
4193
4194         return hclge_reset_prepare_wait(hdev);
4195 }
4196
4197 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4198 {
4199         int ret;
4200
4201         hdev->rst_stats.hw_reset_done_cnt++;
4202
4203         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4204         if (ret)
4205                 return ret;
4206
4207         rtnl_lock();
4208         ret = hclge_reset_stack(hdev);
4209         rtnl_unlock();
4210         if (ret)
4211                 return ret;
4212
4213         hclge_clear_reset_cause(hdev);
4214
4215         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4216         /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4217          * times
4218          */
4219         if (ret &&
4220             hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4221                 return ret;
4222
4223         ret = hclge_reset_prepare_up(hdev);
4224         if (ret)
4225                 return ret;
4226
4227         rtnl_lock();
4228         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4229         rtnl_unlock();
4230         if (ret)
4231                 return ret;
4232
4233         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4234         if (ret)
4235                 return ret;
4236
4237         hdev->last_reset_time = jiffies;
4238         hdev->rst_stats.reset_fail_cnt = 0;
4239         hdev->rst_stats.reset_done_cnt++;
4240         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4241
4242         hclge_update_reset_level(hdev);
4243
4244         return 0;
4245 }
4246
4247 static void hclge_reset(struct hclge_dev *hdev)
4248 {
4249         if (hclge_reset_prepare(hdev))
4250                 goto err_reset;
4251
4252         if (hclge_reset_wait(hdev))
4253                 goto err_reset;
4254
4255         if (hclge_reset_rebuild(hdev))
4256                 goto err_reset;
4257
4258         return;
4259
4260 err_reset:
4261         if (hclge_reset_err_handle(hdev))
4262                 hclge_reset_task_schedule(hdev);
4263 }
4264
4265 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4266 {
4267         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4268         struct hclge_dev *hdev = ae_dev->priv;
4269
4270         /* We might end up getting called broadly because of 2 below cases:
4271          * 1. Recoverable error was conveyed through APEI and only way to bring
4272          *    normalcy is to reset.
4273          * 2. A new reset request from the stack due to timeout
4274          *
4275          * check if this is a new reset request and we are not here just because
4276          * last reset attempt did not succeed and watchdog hit us again. We will
4277          * know this if last reset request did not occur very recently (watchdog
4278          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4279          * In case of new request we reset the "reset level" to PF reset.
4280          * And if it is a repeat reset request of the most recent one then we
4281          * want to make sure we throttle the reset request. Therefore, we will
4282          * not allow it again before 3*HZ times.
4283          */
4284
4285         if (time_before(jiffies, (hdev->last_reset_time +
4286                                   HCLGE_RESET_INTERVAL))) {
4287                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4288                 return;
4289         }
4290
4291         if (hdev->default_reset_request) {
4292                 hdev->reset_level =
4293                         hclge_get_reset_level(ae_dev,
4294                                               &hdev->default_reset_request);
4295         } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4296                 hdev->reset_level = HNAE3_FUNC_RESET;
4297         }
4298
4299         dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4300                  hdev->reset_level);
4301
4302         /* request reset & schedule reset task */
4303         set_bit(hdev->reset_level, &hdev->reset_request);
4304         hclge_reset_task_schedule(hdev);
4305
4306         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4307                 hdev->reset_level++;
4308 }
4309
4310 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4311                                         enum hnae3_reset_type rst_type)
4312 {
4313         struct hclge_dev *hdev = ae_dev->priv;
4314
4315         set_bit(rst_type, &hdev->default_reset_request);
4316 }
4317
4318 static void hclge_reset_timer(struct timer_list *t)
4319 {
4320         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4321
4322         /* if default_reset_request has no value, it means that this reset
4323          * request has already be handled, so just return here
4324          */
4325         if (!hdev->default_reset_request)
4326                 return;
4327
4328         dev_info(&hdev->pdev->dev,
4329                  "triggering reset in reset timer\n");
4330         hclge_reset_event(hdev->pdev, NULL);
4331 }
4332
4333 static void hclge_reset_subtask(struct hclge_dev *hdev)
4334 {
4335         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4336
4337         /* check if there is any ongoing reset in the hardware. This status can
4338          * be checked from reset_pending. If there is then, we need to wait for
4339          * hardware to complete reset.
4340          *    a. If we are able to figure out in reasonable time that hardware
4341          *       has fully resetted then, we can proceed with driver, client
4342          *       reset.
4343          *    b. else, we can come back later to check this status so re-sched
4344          *       now.
4345          */
4346         hdev->last_reset_time = jiffies;
4347         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4348         if (hdev->reset_type != HNAE3_NONE_RESET)
4349                 hclge_reset(hdev);
4350
4351         /* check if we got any *new* reset requests to be honored */
4352         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4353         if (hdev->reset_type != HNAE3_NONE_RESET)
4354                 hclge_do_reset(hdev);
4355
4356         hdev->reset_type = HNAE3_NONE_RESET;
4357 }
4358
4359 static void hclge_handle_err_reset_request(struct hclge_dev *hdev)
4360 {
4361         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4362         enum hnae3_reset_type reset_type;
4363
4364         if (ae_dev->hw_err_reset_req) {
4365                 reset_type = hclge_get_reset_level(ae_dev,
4366                                                    &ae_dev->hw_err_reset_req);
4367                 hclge_set_def_reset_request(ae_dev, reset_type);
4368         }
4369
4370         if (hdev->default_reset_request && ae_dev->ops->reset_event)
4371                 ae_dev->ops->reset_event(hdev->pdev, NULL);
4372
4373         /* enable interrupt after error handling complete */
4374         hclge_enable_vector(&hdev->misc_vector, true);
4375 }
4376
4377 static void hclge_handle_err_recovery(struct hclge_dev *hdev)
4378 {
4379         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4380
4381         ae_dev->hw_err_reset_req = 0;
4382
4383         if (hclge_find_error_source(hdev)) {
4384                 hclge_handle_error_info_log(ae_dev);
4385                 hclge_handle_mac_tnl(hdev);
4386         }
4387
4388         hclge_handle_err_reset_request(hdev);
4389 }
4390
4391 static void hclge_misc_err_recovery(struct hclge_dev *hdev)
4392 {
4393         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4394         struct device *dev = &hdev->pdev->dev;
4395         u32 msix_sts_reg;
4396
4397         msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
4398         if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
4399                 if (hclge_handle_hw_msix_error
4400                                 (hdev, &hdev->default_reset_request))
4401                         dev_info(dev, "received msix interrupt 0x%x\n",
4402                                  msix_sts_reg);
4403         }
4404
4405         hclge_handle_hw_ras_error(ae_dev);
4406
4407         hclge_handle_err_reset_request(hdev);
4408 }
4409
4410 static void hclge_errhand_service_task(struct hclge_dev *hdev)
4411 {
4412         if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
4413                 return;
4414
4415         if (hnae3_dev_ras_imp_supported(hdev))
4416                 hclge_handle_err_recovery(hdev);
4417         else
4418                 hclge_misc_err_recovery(hdev);
4419 }
4420
4421 static void hclge_reset_service_task(struct hclge_dev *hdev)
4422 {
4423         if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4424                 return;
4425
4426         down(&hdev->reset_sem);
4427         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4428
4429         hclge_reset_subtask(hdev);
4430
4431         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4432         up(&hdev->reset_sem);
4433 }
4434
4435 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4436 {
4437         int i;
4438
4439         /* start from vport 1 for PF is always alive */
4440         for (i = 1; i < hdev->num_alloc_vport; i++) {
4441                 struct hclge_vport *vport = &hdev->vport[i];
4442
4443                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4444                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4445
4446                 /* If vf is not alive, set to default value */
4447                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4448                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4449         }
4450 }
4451
4452 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4453 {
4454         unsigned long delta = round_jiffies_relative(HZ);
4455
4456         if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4457                 return;
4458
4459         /* Always handle the link updating to make sure link state is
4460          * updated when it is triggered by mbx.
4461          */
4462         hclge_update_link_status(hdev);
4463         hclge_sync_mac_table(hdev);
4464         hclge_sync_promisc_mode(hdev);
4465         hclge_sync_fd_table(hdev);
4466
4467         if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4468                 delta = jiffies - hdev->last_serv_processed;
4469
4470                 if (delta < round_jiffies_relative(HZ)) {
4471                         delta = round_jiffies_relative(HZ) - delta;
4472                         goto out;
4473                 }
4474         }
4475
4476         hdev->serv_processed_cnt++;
4477         hclge_update_vport_alive(hdev);
4478
4479         if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4480                 hdev->last_serv_processed = jiffies;
4481                 goto out;
4482         }
4483
4484         if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4485                 hclge_update_stats_for_all(hdev);
4486
4487         hclge_update_port_info(hdev);
4488         hclge_sync_vlan_filter(hdev);
4489
4490         if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4491                 hclge_rfs_filter_expire(hdev);
4492
4493         hdev->last_serv_processed = jiffies;
4494
4495 out:
4496         hclge_task_schedule(hdev, delta);
4497 }
4498
4499 static void hclge_ptp_service_task(struct hclge_dev *hdev)
4500 {
4501         unsigned long flags;
4502
4503         if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) ||
4504             !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) ||
4505             !time_is_before_jiffies(hdev->ptp->tx_start + HZ))
4506                 return;
4507
4508         /* to prevent concurrence with the irq handler */
4509         spin_lock_irqsave(&hdev->ptp->lock, flags);
4510
4511         /* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq
4512          * handler may handle it just before spin_lock_irqsave().
4513          */
4514         if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state))
4515                 hclge_ptp_clean_tx_hwts(hdev);
4516
4517         spin_unlock_irqrestore(&hdev->ptp->lock, flags);
4518 }
4519
4520 static void hclge_service_task(struct work_struct *work)
4521 {
4522         struct hclge_dev *hdev =
4523                 container_of(work, struct hclge_dev, service_task.work);
4524
4525         hclge_errhand_service_task(hdev);
4526         hclge_reset_service_task(hdev);
4527         hclge_ptp_service_task(hdev);
4528         hclge_mailbox_service_task(hdev);
4529         hclge_periodic_service_task(hdev);
4530
4531         /* Handle error recovery, reset and mbx again in case periodical task
4532          * delays the handling by calling hclge_task_schedule() in
4533          * hclge_periodic_service_task().
4534          */
4535         hclge_errhand_service_task(hdev);
4536         hclge_reset_service_task(hdev);
4537         hclge_mailbox_service_task(hdev);
4538 }
4539
4540 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4541 {
4542         /* VF handle has no client */
4543         if (!handle->client)
4544                 return container_of(handle, struct hclge_vport, nic);
4545         else if (handle->client->type == HNAE3_CLIENT_ROCE)
4546                 return container_of(handle, struct hclge_vport, roce);
4547         else
4548                 return container_of(handle, struct hclge_vport, nic);
4549 }
4550
4551 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4552                                   struct hnae3_vector_info *vector_info)
4553 {
4554 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2  64
4555
4556         vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4557
4558         /* need an extend offset to config vector >= 64 */
4559         if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4560                 vector_info->io_addr = hdev->hw.io_base +
4561                                 HCLGE_VECTOR_REG_BASE +
4562                                 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4563         else
4564                 vector_info->io_addr = hdev->hw.io_base +
4565                                 HCLGE_VECTOR_EXT_REG_BASE +
4566                                 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4567                                 HCLGE_VECTOR_REG_OFFSET_H +
4568                                 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4569                                 HCLGE_VECTOR_REG_OFFSET;
4570
4571         hdev->vector_status[idx] = hdev->vport[0].vport_id;
4572         hdev->vector_irq[idx] = vector_info->vector;
4573 }
4574
4575 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4576                             struct hnae3_vector_info *vector_info)
4577 {
4578         struct hclge_vport *vport = hclge_get_vport(handle);
4579         struct hnae3_vector_info *vector = vector_info;
4580         struct hclge_dev *hdev = vport->back;
4581         int alloc = 0;
4582         u16 i = 0;
4583         u16 j;
4584
4585         vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4586         vector_num = min(hdev->num_msi_left, vector_num);
4587
4588         for (j = 0; j < vector_num; j++) {
4589                 while (++i < hdev->num_nic_msi) {
4590                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4591                                 hclge_get_vector_info(hdev, i, vector);
4592                                 vector++;
4593                                 alloc++;
4594
4595                                 break;
4596                         }
4597                 }
4598         }
4599         hdev->num_msi_left -= alloc;
4600         hdev->num_msi_used += alloc;
4601
4602         return alloc;
4603 }
4604
4605 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4606 {
4607         int i;
4608
4609         for (i = 0; i < hdev->num_msi; i++)
4610                 if (vector == hdev->vector_irq[i])
4611                         return i;
4612
4613         return -EINVAL;
4614 }
4615
4616 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4617 {
4618         struct hclge_vport *vport = hclge_get_vport(handle);
4619         struct hclge_dev *hdev = vport->back;
4620         int vector_id;
4621
4622         vector_id = hclge_get_vector_index(hdev, vector);
4623         if (vector_id < 0) {
4624                 dev_err(&hdev->pdev->dev,
4625                         "Get vector index fail. vector = %d\n", vector);
4626                 return vector_id;
4627         }
4628
4629         hclge_free_vector(hdev, vector_id);
4630
4631         return 0;
4632 }
4633
4634 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4635 {
4636         return HCLGE_RSS_KEY_SIZE;
4637 }
4638
4639 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4640                                   const u8 hfunc, const u8 *key)
4641 {
4642         struct hclge_rss_config_cmd *req;
4643         unsigned int key_offset = 0;
4644         struct hclge_desc desc;
4645         int key_counts;
4646         int key_size;
4647         int ret;
4648
4649         key_counts = HCLGE_RSS_KEY_SIZE;
4650         req = (struct hclge_rss_config_cmd *)desc.data;
4651
4652         while (key_counts) {
4653                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4654                                            false);
4655
4656                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4657                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4658
4659                 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4660                 memcpy(req->hash_key,
4661                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4662
4663                 key_counts -= key_size;
4664                 key_offset++;
4665                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4666                 if (ret) {
4667                         dev_err(&hdev->pdev->dev,
4668                                 "Configure RSS config fail, status = %d\n",
4669                                 ret);
4670                         return ret;
4671                 }
4672         }
4673         return 0;
4674 }
4675
4676 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4677 {
4678         struct hclge_rss_indirection_table_cmd *req;
4679         struct hclge_desc desc;
4680         int rss_cfg_tbl_num;
4681         u8 rss_msb_oft;
4682         u8 rss_msb_val;
4683         int ret;
4684         u16 qid;
4685         int i;
4686         u32 j;
4687
4688         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4689         rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4690                           HCLGE_RSS_CFG_TBL_SIZE;
4691
4692         for (i = 0; i < rss_cfg_tbl_num; i++) {
4693                 hclge_cmd_setup_basic_desc
4694                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4695
4696                 req->start_table_index =
4697                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4698                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4699                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4700                         qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4701                         req->rss_qid_l[j] = qid & 0xff;
4702                         rss_msb_oft =
4703                                 j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4704                         rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4705                                 (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4706                         req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4707                 }
4708                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4709                 if (ret) {
4710                         dev_err(&hdev->pdev->dev,
4711                                 "Configure rss indir table fail,status = %d\n",
4712                                 ret);
4713                         return ret;
4714                 }
4715         }
4716         return 0;
4717 }
4718
4719 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4720                                  u16 *tc_size, u16 *tc_offset)
4721 {
4722         struct hclge_rss_tc_mode_cmd *req;
4723         struct hclge_desc desc;
4724         int ret;
4725         int i;
4726
4727         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4728         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4729
4730         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4731                 u16 mode = 0;
4732
4733                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4734                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4735                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4736                 hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4737                               tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4738                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4739                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4740
4741                 req->rss_tc_mode[i] = cpu_to_le16(mode);
4742         }
4743
4744         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4745         if (ret)
4746                 dev_err(&hdev->pdev->dev,
4747                         "Configure rss tc mode fail, status = %d\n", ret);
4748
4749         return ret;
4750 }
4751
4752 static void hclge_get_rss_type(struct hclge_vport *vport)
4753 {
4754         if (vport->rss_tuple_sets.ipv4_tcp_en ||
4755             vport->rss_tuple_sets.ipv4_udp_en ||
4756             vport->rss_tuple_sets.ipv4_sctp_en ||
4757             vport->rss_tuple_sets.ipv6_tcp_en ||
4758             vport->rss_tuple_sets.ipv6_udp_en ||
4759             vport->rss_tuple_sets.ipv6_sctp_en)
4760                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4761         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4762                  vport->rss_tuple_sets.ipv6_fragment_en)
4763                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4764         else
4765                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4766 }
4767
4768 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4769 {
4770         struct hclge_rss_input_tuple_cmd *req;
4771         struct hclge_desc desc;
4772         int ret;
4773
4774         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4775
4776         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4777
4778         /* Get the tuple cfg from pf */
4779         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4780         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4781         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4782         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4783         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4784         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4785         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4786         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4787         hclge_get_rss_type(&hdev->vport[0]);
4788         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4789         if (ret)
4790                 dev_err(&hdev->pdev->dev,
4791                         "Configure rss input fail, status = %d\n", ret);
4792         return ret;
4793 }
4794
4795 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4796                          u8 *key, u8 *hfunc)
4797 {
4798         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4799         struct hclge_vport *vport = hclge_get_vport(handle);
4800         int i;
4801
4802         /* Get hash algorithm */
4803         if (hfunc) {
4804                 switch (vport->rss_algo) {
4805                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4806                         *hfunc = ETH_RSS_HASH_TOP;
4807                         break;
4808                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4809                         *hfunc = ETH_RSS_HASH_XOR;
4810                         break;
4811                 default:
4812                         *hfunc = ETH_RSS_HASH_UNKNOWN;
4813                         break;
4814                 }
4815         }
4816
4817         /* Get the RSS Key required by the user */
4818         if (key)
4819                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4820
4821         /* Get indirect table */
4822         if (indir)
4823                 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4824                         indir[i] =  vport->rss_indirection_tbl[i];
4825
4826         return 0;
4827 }
4828
4829 static int hclge_parse_rss_hfunc(struct hclge_vport *vport, const u8 hfunc,
4830                                  u8 *hash_algo)
4831 {
4832         switch (hfunc) {
4833         case ETH_RSS_HASH_TOP:
4834                 *hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4835                 return 0;
4836         case ETH_RSS_HASH_XOR:
4837                 *hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4838                 return 0;
4839         case ETH_RSS_HASH_NO_CHANGE:
4840                 *hash_algo = vport->rss_algo;
4841                 return 0;
4842         default:
4843                 return -EINVAL;
4844         }
4845 }
4846
4847 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4848                          const  u8 *key, const  u8 hfunc)
4849 {
4850         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4851         struct hclge_vport *vport = hclge_get_vport(handle);
4852         struct hclge_dev *hdev = vport->back;
4853         u8 hash_algo;
4854         int ret, i;
4855
4856         ret = hclge_parse_rss_hfunc(vport, hfunc, &hash_algo);
4857         if (ret) {
4858                 dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
4859                 return ret;
4860         }
4861
4862         /* Set the RSS Hash Key if specififed by the user */
4863         if (key) {
4864                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4865                 if (ret)
4866                         return ret;
4867
4868                 /* Update the shadow RSS key with user specified qids */
4869                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4870         } else {
4871                 ret = hclge_set_rss_algo_key(hdev, hash_algo,
4872                                              vport->rss_hash_key);
4873                 if (ret)
4874                         return ret;
4875         }
4876         vport->rss_algo = hash_algo;
4877
4878         /* Update the shadow RSS table with user specified qids */
4879         for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4880                 vport->rss_indirection_tbl[i] = indir[i];
4881
4882         /* Update the hardware */
4883         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4884 }
4885
4886 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4887 {
4888         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4889
4890         if (nfc->data & RXH_L4_B_2_3)
4891                 hash_sets |= HCLGE_D_PORT_BIT;
4892         else
4893                 hash_sets &= ~HCLGE_D_PORT_BIT;
4894
4895         if (nfc->data & RXH_IP_SRC)
4896                 hash_sets |= HCLGE_S_IP_BIT;
4897         else
4898                 hash_sets &= ~HCLGE_S_IP_BIT;
4899
4900         if (nfc->data & RXH_IP_DST)
4901                 hash_sets |= HCLGE_D_IP_BIT;
4902         else
4903                 hash_sets &= ~HCLGE_D_IP_BIT;
4904
4905         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4906                 hash_sets |= HCLGE_V_TAG_BIT;
4907
4908         return hash_sets;
4909 }
4910
4911 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4912                                     struct ethtool_rxnfc *nfc,
4913                                     struct hclge_rss_input_tuple_cmd *req)
4914 {
4915         struct hclge_dev *hdev = vport->back;
4916         u8 tuple_sets;
4917
4918         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4919         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4920         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4921         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4922         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4923         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4924         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4925         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4926
4927         tuple_sets = hclge_get_rss_hash_bits(nfc);
4928         switch (nfc->flow_type) {
4929         case TCP_V4_FLOW:
4930                 req->ipv4_tcp_en = tuple_sets;
4931                 break;
4932         case TCP_V6_FLOW:
4933                 req->ipv6_tcp_en = tuple_sets;
4934                 break;
4935         case UDP_V4_FLOW:
4936                 req->ipv4_udp_en = tuple_sets;
4937                 break;
4938         case UDP_V6_FLOW:
4939                 req->ipv6_udp_en = tuple_sets;
4940                 break;
4941         case SCTP_V4_FLOW:
4942                 req->ipv4_sctp_en = tuple_sets;
4943                 break;
4944         case SCTP_V6_FLOW:
4945                 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4946                     (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4947                         return -EINVAL;
4948
4949                 req->ipv6_sctp_en = tuple_sets;
4950                 break;
4951         case IPV4_FLOW:
4952                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4953                 break;
4954         case IPV6_FLOW:
4955                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4956                 break;
4957         default:
4958                 return -EINVAL;
4959         }
4960
4961         return 0;
4962 }
4963
4964 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4965                                struct ethtool_rxnfc *nfc)
4966 {
4967         struct hclge_vport *vport = hclge_get_vport(handle);
4968         struct hclge_dev *hdev = vport->back;
4969         struct hclge_rss_input_tuple_cmd *req;
4970         struct hclge_desc desc;
4971         int ret;
4972
4973         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4974                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
4975                 return -EINVAL;
4976
4977         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4978         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4979
4980         ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4981         if (ret) {
4982                 dev_err(&hdev->pdev->dev,
4983                         "failed to init rss tuple cmd, ret = %d\n", ret);
4984                 return ret;
4985         }
4986
4987         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4988         if (ret) {
4989                 dev_err(&hdev->pdev->dev,
4990                         "Set rss tuple fail, status = %d\n", ret);
4991                 return ret;
4992         }
4993
4994         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4995         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4996         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4997         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4998         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4999         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
5000         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
5001         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
5002         hclge_get_rss_type(vport);
5003         return 0;
5004 }
5005
5006 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
5007                                      u8 *tuple_sets)
5008 {
5009         switch (flow_type) {
5010         case TCP_V4_FLOW:
5011                 *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
5012                 break;
5013         case UDP_V4_FLOW:
5014                 *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
5015                 break;
5016         case TCP_V6_FLOW:
5017                 *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
5018                 break;
5019         case UDP_V6_FLOW:
5020                 *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
5021                 break;
5022         case SCTP_V4_FLOW:
5023                 *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
5024                 break;
5025         case SCTP_V6_FLOW:
5026                 *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
5027                 break;
5028         case IPV4_FLOW:
5029         case IPV6_FLOW:
5030                 *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
5031                 break;
5032         default:
5033                 return -EINVAL;
5034         }
5035
5036         return 0;
5037 }
5038
5039 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
5040 {
5041         u64 tuple_data = 0;
5042
5043         if (tuple_sets & HCLGE_D_PORT_BIT)
5044                 tuple_data |= RXH_L4_B_2_3;
5045         if (tuple_sets & HCLGE_S_PORT_BIT)
5046                 tuple_data |= RXH_L4_B_0_1;
5047         if (tuple_sets & HCLGE_D_IP_BIT)
5048                 tuple_data |= RXH_IP_DST;
5049         if (tuple_sets & HCLGE_S_IP_BIT)
5050                 tuple_data |= RXH_IP_SRC;
5051
5052         return tuple_data;
5053 }
5054
5055 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
5056                                struct ethtool_rxnfc *nfc)
5057 {
5058         struct hclge_vport *vport = hclge_get_vport(handle);
5059         u8 tuple_sets;
5060         int ret;
5061
5062         nfc->data = 0;
5063
5064         ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
5065         if (ret || !tuple_sets)
5066                 return ret;
5067
5068         nfc->data = hclge_convert_rss_tuple(tuple_sets);
5069
5070         return 0;
5071 }
5072
5073 static int hclge_get_tc_size(struct hnae3_handle *handle)
5074 {
5075         struct hclge_vport *vport = hclge_get_vport(handle);
5076         struct hclge_dev *hdev = vport->back;
5077
5078         return hdev->pf_rss_size_max;
5079 }
5080
5081 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
5082 {
5083         struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
5084         struct hclge_vport *vport = hdev->vport;
5085         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
5086         u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
5087         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
5088         struct hnae3_tc_info *tc_info;
5089         u16 roundup_size;
5090         u16 rss_size;
5091         int i;
5092
5093         tc_info = &vport->nic.kinfo.tc_info;
5094         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
5095                 rss_size = tc_info->tqp_count[i];
5096                 tc_valid[i] = 0;
5097
5098                 if (!(hdev->hw_tc_map & BIT(i)))
5099                         continue;
5100
5101                 /* tc_size set to hardware is the log2 of roundup power of two
5102                  * of rss_size, the acutal queue size is limited by indirection
5103                  * table.
5104                  */
5105                 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
5106                     rss_size == 0) {
5107                         dev_err(&hdev->pdev->dev,
5108                                 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
5109                                 rss_size);
5110                         return -EINVAL;
5111                 }
5112
5113                 roundup_size = roundup_pow_of_two(rss_size);
5114                 roundup_size = ilog2(roundup_size);
5115
5116                 tc_valid[i] = 1;
5117                 tc_size[i] = roundup_size;
5118                 tc_offset[i] = tc_info->tqp_offset[i];
5119         }
5120
5121         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
5122 }
5123
5124 int hclge_rss_init_hw(struct hclge_dev *hdev)
5125 {
5126         struct hclge_vport *vport = hdev->vport;
5127         u16 *rss_indir = vport[0].rss_indirection_tbl;
5128         u8 *key = vport[0].rss_hash_key;
5129         u8 hfunc = vport[0].rss_algo;
5130         int ret;
5131
5132         ret = hclge_set_rss_indir_table(hdev, rss_indir);
5133         if (ret)
5134                 return ret;
5135
5136         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
5137         if (ret)
5138                 return ret;
5139
5140         ret = hclge_set_rss_input_tuple(hdev);
5141         if (ret)
5142                 return ret;
5143
5144         return hclge_init_rss_tc_mode(hdev);
5145 }
5146
5147 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
5148 {
5149         struct hclge_vport *vport = &hdev->vport[0];
5150         int i;
5151
5152         for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
5153                 vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
5154 }
5155
5156 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
5157 {
5158         u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
5159         int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
5160         struct hclge_vport *vport = &hdev->vport[0];
5161         u16 *rss_ind_tbl;
5162
5163         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
5164                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
5165
5166         vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5167         vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5168         vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
5169         vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5170         vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5171         vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5172         vport->rss_tuple_sets.ipv6_sctp_en =
5173                 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
5174                 HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
5175                 HCLGE_RSS_INPUT_TUPLE_SCTP;
5176         vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5177
5178         vport->rss_algo = rss_algo;
5179
5180         rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
5181                                    sizeof(*rss_ind_tbl), GFP_KERNEL);
5182         if (!rss_ind_tbl)
5183                 return -ENOMEM;
5184
5185         vport->rss_indirection_tbl = rss_ind_tbl;
5186         memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
5187
5188         hclge_rss_indir_init_cfg(hdev);
5189
5190         return 0;
5191 }
5192
5193 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
5194                                 int vector_id, bool en,
5195                                 struct hnae3_ring_chain_node *ring_chain)
5196 {
5197         struct hclge_dev *hdev = vport->back;
5198         struct hnae3_ring_chain_node *node;
5199         struct hclge_desc desc;
5200         struct hclge_ctrl_vector_chain_cmd *req =
5201                 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
5202         enum hclge_cmd_status status;
5203         enum hclge_opcode_type op;
5204         u16 tqp_type_and_id;
5205         int i;
5206
5207         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
5208         hclge_cmd_setup_basic_desc(&desc, op, false);
5209         req->int_vector_id_l = hnae3_get_field(vector_id,
5210                                                HCLGE_VECTOR_ID_L_M,
5211                                                HCLGE_VECTOR_ID_L_S);
5212         req->int_vector_id_h = hnae3_get_field(vector_id,
5213                                                HCLGE_VECTOR_ID_H_M,
5214                                                HCLGE_VECTOR_ID_H_S);
5215
5216         i = 0;
5217         for (node = ring_chain; node; node = node->next) {
5218                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
5219                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
5220                                 HCLGE_INT_TYPE_S,
5221                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
5222                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
5223                                 HCLGE_TQP_ID_S, node->tqp_index);
5224                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
5225                                 HCLGE_INT_GL_IDX_S,
5226                                 hnae3_get_field(node->int_gl_idx,
5227                                                 HNAE3_RING_GL_IDX_M,
5228                                                 HNAE3_RING_GL_IDX_S));
5229                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
5230                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
5231                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
5232                         req->vfid = vport->vport_id;
5233
5234                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
5235                         if (status) {
5236                                 dev_err(&hdev->pdev->dev,
5237                                         "Map TQP fail, status is %d.\n",
5238                                         status);
5239                                 return -EIO;
5240                         }
5241                         i = 0;
5242
5243                         hclge_cmd_setup_basic_desc(&desc,
5244                                                    op,
5245                                                    false);
5246                         req->int_vector_id_l =
5247                                 hnae3_get_field(vector_id,
5248                                                 HCLGE_VECTOR_ID_L_M,
5249                                                 HCLGE_VECTOR_ID_L_S);
5250                         req->int_vector_id_h =
5251                                 hnae3_get_field(vector_id,
5252                                                 HCLGE_VECTOR_ID_H_M,
5253                                                 HCLGE_VECTOR_ID_H_S);
5254                 }
5255         }
5256
5257         if (i > 0) {
5258                 req->int_cause_num = i;
5259                 req->vfid = vport->vport_id;
5260                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5261                 if (status) {
5262                         dev_err(&hdev->pdev->dev,
5263                                 "Map TQP fail, status is %d.\n", status);
5264                         return -EIO;
5265                 }
5266         }
5267
5268         return 0;
5269 }
5270
5271 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5272                                     struct hnae3_ring_chain_node *ring_chain)
5273 {
5274         struct hclge_vport *vport = hclge_get_vport(handle);
5275         struct hclge_dev *hdev = vport->back;
5276         int vector_id;
5277
5278         vector_id = hclge_get_vector_index(hdev, vector);
5279         if (vector_id < 0) {
5280                 dev_err(&hdev->pdev->dev,
5281                         "failed to get vector index. vector=%d\n", vector);
5282                 return vector_id;
5283         }
5284
5285         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5286 }
5287
5288 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5289                                        struct hnae3_ring_chain_node *ring_chain)
5290 {
5291         struct hclge_vport *vport = hclge_get_vport(handle);
5292         struct hclge_dev *hdev = vport->back;
5293         int vector_id, ret;
5294
5295         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5296                 return 0;
5297
5298         vector_id = hclge_get_vector_index(hdev, vector);
5299         if (vector_id < 0) {
5300                 dev_err(&handle->pdev->dev,
5301                         "Get vector index fail. ret =%d\n", vector_id);
5302                 return vector_id;
5303         }
5304
5305         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5306         if (ret)
5307                 dev_err(&handle->pdev->dev,
5308                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5309                         vector_id, ret);
5310
5311         return ret;
5312 }
5313
5314 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5315                                       bool en_uc, bool en_mc, bool en_bc)
5316 {
5317         struct hclge_vport *vport = &hdev->vport[vf_id];
5318         struct hnae3_handle *handle = &vport->nic;
5319         struct hclge_promisc_cfg_cmd *req;
5320         struct hclge_desc desc;
5321         bool uc_tx_en = en_uc;
5322         u8 promisc_cfg = 0;
5323         int ret;
5324
5325         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5326
5327         req = (struct hclge_promisc_cfg_cmd *)desc.data;
5328         req->vf_id = vf_id;
5329
5330         if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5331                 uc_tx_en = false;
5332
5333         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5334         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5335         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5336         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5337         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5338         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5339         req->extend_promisc = promisc_cfg;
5340
5341         /* to be compatible with DEVICE_VERSION_V1/2 */
5342         promisc_cfg = 0;
5343         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5344         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5345         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5346         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5347         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5348         req->promisc = promisc_cfg;
5349
5350         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5351         if (ret)
5352                 dev_err(&hdev->pdev->dev,
5353                         "failed to set vport %u promisc mode, ret = %d.\n",
5354                         vf_id, ret);
5355
5356         return ret;
5357 }
5358
5359 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5360                                  bool en_mc_pmc, bool en_bc_pmc)
5361 {
5362         return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5363                                           en_uc_pmc, en_mc_pmc, en_bc_pmc);
5364 }
5365
5366 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5367                                   bool en_mc_pmc)
5368 {
5369         struct hclge_vport *vport = hclge_get_vport(handle);
5370         struct hclge_dev *hdev = vport->back;
5371         bool en_bc_pmc = true;
5372
5373         /* For device whose version below V2, if broadcast promisc enabled,
5374          * vlan filter is always bypassed. So broadcast promisc should be
5375          * disabled until user enable promisc mode
5376          */
5377         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5378                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5379
5380         return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5381                                             en_bc_pmc);
5382 }
5383
5384 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5385 {
5386         struct hclge_vport *vport = hclge_get_vport(handle);
5387
5388         set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
5389 }
5390
5391 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5392 {
5393         if (hlist_empty(&hdev->fd_rule_list))
5394                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5395 }
5396
5397 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5398 {
5399         if (!test_bit(location, hdev->fd_bmap)) {
5400                 set_bit(location, hdev->fd_bmap);
5401                 hdev->hclge_fd_rule_num++;
5402         }
5403 }
5404
5405 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5406 {
5407         if (test_bit(location, hdev->fd_bmap)) {
5408                 clear_bit(location, hdev->fd_bmap);
5409                 hdev->hclge_fd_rule_num--;
5410         }
5411 }
5412
5413 static void hclge_fd_free_node(struct hclge_dev *hdev,
5414                                struct hclge_fd_rule *rule)
5415 {
5416         hlist_del(&rule->rule_node);
5417         kfree(rule);
5418         hclge_sync_fd_state(hdev);
5419 }
5420
5421 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5422                                       struct hclge_fd_rule *old_rule,
5423                                       struct hclge_fd_rule *new_rule,
5424                                       enum HCLGE_FD_NODE_STATE state)
5425 {
5426         switch (state) {
5427         case HCLGE_FD_TO_ADD:
5428         case HCLGE_FD_ACTIVE:
5429                 /* 1) if the new state is TO_ADD, just replace the old rule
5430                  * with the same location, no matter its state, because the
5431                  * new rule will be configured to the hardware.
5432                  * 2) if the new state is ACTIVE, it means the new rule
5433                  * has been configured to the hardware, so just replace
5434                  * the old rule node with the same location.
5435                  * 3) for it doesn't add a new node to the list, so it's
5436                  * unnecessary to update the rule number and fd_bmap.
5437                  */
5438                 new_rule->rule_node.next = old_rule->rule_node.next;
5439                 new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5440                 memcpy(old_rule, new_rule, sizeof(*old_rule));
5441                 kfree(new_rule);
5442                 break;
5443         case HCLGE_FD_DELETED:
5444                 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5445                 hclge_fd_free_node(hdev, old_rule);
5446                 break;
5447         case HCLGE_FD_TO_DEL:
5448                 /* if new request is TO_DEL, and old rule is existent
5449                  * 1) the state of old rule is TO_DEL, we need do nothing,
5450                  * because we delete rule by location, other rule content
5451                  * is unncessary.
5452                  * 2) the state of old rule is ACTIVE, we need to change its
5453                  * state to TO_DEL, so the rule will be deleted when periodic
5454                  * task being scheduled.
5455                  * 3) the state of old rule is TO_ADD, it means the rule hasn't
5456                  * been added to hardware, so we just delete the rule node from
5457                  * fd_rule_list directly.
5458                  */
5459                 if (old_rule->state == HCLGE_FD_TO_ADD) {
5460                         hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5461                         hclge_fd_free_node(hdev, old_rule);
5462                         return;
5463                 }
5464                 old_rule->state = HCLGE_FD_TO_DEL;
5465                 break;
5466         }
5467 }
5468
5469 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5470                                                 u16 location,
5471                                                 struct hclge_fd_rule **parent)
5472 {
5473         struct hclge_fd_rule *rule;
5474         struct hlist_node *node;
5475
5476         hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5477                 if (rule->location == location)
5478                         return rule;
5479                 else if (rule->location > location)
5480                         return NULL;
5481                 /* record the parent node, use to keep the nodes in fd_rule_list
5482                  * in ascend order.
5483                  */
5484                 *parent = rule;
5485         }
5486
5487         return NULL;
5488 }
5489
5490 /* insert fd rule node in ascend order according to rule->location */
5491 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5492                                       struct hclge_fd_rule *rule,
5493                                       struct hclge_fd_rule *parent)
5494 {
5495         INIT_HLIST_NODE(&rule->rule_node);
5496
5497         if (parent)
5498                 hlist_add_behind(&rule->rule_node, &parent->rule_node);
5499         else
5500                 hlist_add_head(&rule->rule_node, hlist);
5501 }
5502
5503 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5504                                      struct hclge_fd_user_def_cfg *cfg)
5505 {
5506         struct hclge_fd_user_def_cfg_cmd *req;
5507         struct hclge_desc desc;
5508         u16 data = 0;
5509         int ret;
5510
5511         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5512
5513         req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5514
5515         hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5516         hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5517                         HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5518         req->ol2_cfg = cpu_to_le16(data);
5519
5520         data = 0;
5521         hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5522         hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5523                         HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5524         req->ol3_cfg = cpu_to_le16(data);
5525
5526         data = 0;
5527         hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5528         hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5529                         HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5530         req->ol4_cfg = cpu_to_le16(data);
5531
5532         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5533         if (ret)
5534                 dev_err(&hdev->pdev->dev,
5535                         "failed to set fd user def data, ret= %d\n", ret);
5536         return ret;
5537 }
5538
5539 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5540 {
5541         int ret;
5542
5543         if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5544                 return;
5545
5546         if (!locked)
5547                 spin_lock_bh(&hdev->fd_rule_lock);
5548
5549         ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5550         if (ret)
5551                 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5552
5553         if (!locked)
5554                 spin_unlock_bh(&hdev->fd_rule_lock);
5555 }
5556
5557 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5558                                           struct hclge_fd_rule *rule)
5559 {
5560         struct hlist_head *hlist = &hdev->fd_rule_list;
5561         struct hclge_fd_rule *fd_rule, *parent = NULL;
5562         struct hclge_fd_user_def_info *info, *old_info;
5563         struct hclge_fd_user_def_cfg *cfg;
5564
5565         if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5566             rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5567                 return 0;
5568
5569         /* for valid layer is start from 1, so need minus 1 to get the cfg */
5570         cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5571         info = &rule->ep.user_def;
5572
5573         if (!cfg->ref_cnt || cfg->offset == info->offset)
5574                 return 0;
5575
5576         if (cfg->ref_cnt > 1)
5577                 goto error;
5578
5579         fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5580         if (fd_rule) {
5581                 old_info = &fd_rule->ep.user_def;
5582                 if (info->layer == old_info->layer)
5583                         return 0;
5584         }
5585
5586 error:
5587         dev_err(&hdev->pdev->dev,
5588                 "No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5589                 info->layer + 1);
5590         return -ENOSPC;
5591 }
5592
5593 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5594                                          struct hclge_fd_rule *rule)
5595 {
5596         struct hclge_fd_user_def_cfg *cfg;
5597
5598         if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5599             rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5600                 return;
5601
5602         cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5603         if (!cfg->ref_cnt) {
5604                 cfg->offset = rule->ep.user_def.offset;
5605                 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5606         }
5607         cfg->ref_cnt++;
5608 }
5609
5610 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5611                                          struct hclge_fd_rule *rule)
5612 {
5613         struct hclge_fd_user_def_cfg *cfg;
5614
5615         if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5616             rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5617                 return;
5618
5619         cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5620         if (!cfg->ref_cnt)
5621                 return;
5622
5623         cfg->ref_cnt--;
5624         if (!cfg->ref_cnt) {
5625                 cfg->offset = 0;
5626                 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5627         }
5628 }
5629
5630 static void hclge_update_fd_list(struct hclge_dev *hdev,
5631                                  enum HCLGE_FD_NODE_STATE state, u16 location,
5632                                  struct hclge_fd_rule *new_rule)
5633 {
5634         struct hlist_head *hlist = &hdev->fd_rule_list;
5635         struct hclge_fd_rule *fd_rule, *parent = NULL;
5636
5637         fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5638         if (fd_rule) {
5639                 hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5640                 if (state == HCLGE_FD_ACTIVE)
5641                         hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5642                 hclge_sync_fd_user_def_cfg(hdev, true);
5643
5644                 hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5645                 return;
5646         }
5647
5648         /* it's unlikely to fail here, because we have checked the rule
5649          * exist before.
5650          */
5651         if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5652                 dev_warn(&hdev->pdev->dev,
5653                          "failed to delete fd rule %u, it's inexistent\n",
5654                          location);
5655                 return;
5656         }
5657
5658         hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5659         hclge_sync_fd_user_def_cfg(hdev, true);
5660
5661         hclge_fd_insert_rule_node(hlist, new_rule, parent);
5662         hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5663
5664         if (state == HCLGE_FD_TO_ADD) {
5665                 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5666                 hclge_task_schedule(hdev, 0);
5667         }
5668 }
5669
5670 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5671 {
5672         struct hclge_get_fd_mode_cmd *req;
5673         struct hclge_desc desc;
5674         int ret;
5675
5676         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5677
5678         req = (struct hclge_get_fd_mode_cmd *)desc.data;
5679
5680         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5681         if (ret) {
5682                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5683                 return ret;
5684         }
5685
5686         *fd_mode = req->mode;
5687
5688         return ret;
5689 }
5690
5691 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5692                                    u32 *stage1_entry_num,
5693                                    u32 *stage2_entry_num,
5694                                    u16 *stage1_counter_num,
5695                                    u16 *stage2_counter_num)
5696 {
5697         struct hclge_get_fd_allocation_cmd *req;
5698         struct hclge_desc desc;
5699         int ret;
5700
5701         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5702
5703         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5704
5705         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5706         if (ret) {
5707                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5708                         ret);
5709                 return ret;
5710         }
5711
5712         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5713         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5714         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5715         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5716
5717         return ret;
5718 }
5719
5720 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5721                                    enum HCLGE_FD_STAGE stage_num)
5722 {
5723         struct hclge_set_fd_key_config_cmd *req;
5724         struct hclge_fd_key_cfg *stage;
5725         struct hclge_desc desc;
5726         int ret;
5727
5728         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5729
5730         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5731         stage = &hdev->fd_cfg.key_cfg[stage_num];
5732         req->stage = stage_num;
5733         req->key_select = stage->key_sel;
5734         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5735         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5736         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5737         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5738         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5739         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5740
5741         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5742         if (ret)
5743                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5744
5745         return ret;
5746 }
5747
5748 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5749 {
5750         struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5751
5752         spin_lock_bh(&hdev->fd_rule_lock);
5753         memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5754         spin_unlock_bh(&hdev->fd_rule_lock);
5755
5756         hclge_fd_set_user_def_cmd(hdev, cfg);
5757 }
5758
5759 static int hclge_init_fd_config(struct hclge_dev *hdev)
5760 {
5761 #define LOW_2_WORDS             0x03
5762         struct hclge_fd_key_cfg *key_cfg;
5763         int ret;
5764
5765         if (!hnae3_dev_fd_supported(hdev))
5766                 return 0;
5767
5768         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5769         if (ret)
5770                 return ret;
5771
5772         switch (hdev->fd_cfg.fd_mode) {
5773         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5774                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5775                 break;
5776         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5777                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5778                 break;
5779         default:
5780                 dev_err(&hdev->pdev->dev,
5781                         "Unsupported flow director mode %u\n",
5782                         hdev->fd_cfg.fd_mode);
5783                 return -EOPNOTSUPP;
5784         }
5785
5786         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5787         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5788         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5789         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5790         key_cfg->outer_sipv6_word_en = 0;
5791         key_cfg->outer_dipv6_word_en = 0;
5792
5793         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5794                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5795                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5796                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5797
5798         /* If use max 400bit key, we can support tuples for ether type */
5799         if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5800                 key_cfg->tuple_active |=
5801                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5802                 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5803                         key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5804         }
5805
5806         /* roce_type is used to filter roce frames
5807          * dst_vport is used to specify the rule
5808          */
5809         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5810
5811         ret = hclge_get_fd_allocation(hdev,
5812                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5813                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5814                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5815                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5816         if (ret)
5817                 return ret;
5818
5819         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5820 }
5821
5822 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5823                                 int loc, u8 *key, bool is_add)
5824 {
5825         struct hclge_fd_tcam_config_1_cmd *req1;
5826         struct hclge_fd_tcam_config_2_cmd *req2;
5827         struct hclge_fd_tcam_config_3_cmd *req3;
5828         struct hclge_desc desc[3];
5829         int ret;
5830
5831         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5832         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5833         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5834         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5835         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5836
5837         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5838         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5839         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5840
5841         req1->stage = stage;
5842         req1->xy_sel = sel_x ? 1 : 0;
5843         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5844         req1->index = cpu_to_le32(loc);
5845         req1->entry_vld = sel_x ? is_add : 0;
5846
5847         if (key) {
5848                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5849                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5850                        sizeof(req2->tcam_data));
5851                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5852                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5853         }
5854
5855         ret = hclge_cmd_send(&hdev->hw, desc, 3);
5856         if (ret)
5857                 dev_err(&hdev->pdev->dev,
5858                         "config tcam key fail, ret=%d\n",
5859                         ret);
5860
5861         return ret;
5862 }
5863
5864 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5865                               struct hclge_fd_ad_data *action)
5866 {
5867         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5868         struct hclge_fd_ad_config_cmd *req;
5869         struct hclge_desc desc;
5870         u64 ad_data = 0;
5871         int ret;
5872
5873         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5874
5875         req = (struct hclge_fd_ad_config_cmd *)desc.data;
5876         req->index = cpu_to_le32(loc);
5877         req->stage = stage;
5878
5879         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5880                       action->write_rule_id_to_bd);
5881         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5882                         action->rule_id);
5883         if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5884                 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5885                               action->override_tc);
5886                 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5887                                 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5888         }
5889         ad_data <<= 32;
5890         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5891         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5892                       action->forward_to_direct_queue);
5893         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5894                         action->queue_id);
5895         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5896         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5897                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5898         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5899         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5900                         action->counter_id);
5901
5902         req->ad_data = cpu_to_le64(ad_data);
5903         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5904         if (ret)
5905                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5906
5907         return ret;
5908 }
5909
5910 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5911                                    struct hclge_fd_rule *rule)
5912 {
5913         int offset, moffset, ip_offset;
5914         enum HCLGE_FD_KEY_OPT key_opt;
5915         u16 tmp_x_s, tmp_y_s;
5916         u32 tmp_x_l, tmp_y_l;
5917         u8 *p = (u8 *)rule;
5918         int i;
5919
5920         if (rule->unused_tuple & BIT(tuple_bit))
5921                 return true;
5922
5923         key_opt = tuple_key_info[tuple_bit].key_opt;
5924         offset = tuple_key_info[tuple_bit].offset;
5925         moffset = tuple_key_info[tuple_bit].moffset;
5926
5927         switch (key_opt) {
5928         case KEY_OPT_U8:
5929                 calc_x(*key_x, p[offset], p[moffset]);
5930                 calc_y(*key_y, p[offset], p[moffset]);
5931
5932                 return true;
5933         case KEY_OPT_LE16:
5934                 calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5935                 calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5936                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5937                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5938
5939                 return true;
5940         case KEY_OPT_LE32:
5941                 calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5942                 calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5943                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5944                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5945
5946                 return true;
5947         case KEY_OPT_MAC:
5948                 for (i = 0; i < ETH_ALEN; i++) {
5949                         calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5950                                p[moffset + i]);
5951                         calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5952                                p[moffset + i]);
5953                 }
5954
5955                 return true;
5956         case KEY_OPT_IP:
5957                 ip_offset = IPV4_INDEX * sizeof(u32);
5958                 calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5959                        *(u32 *)(&p[moffset + ip_offset]));
5960                 calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5961                        *(u32 *)(&p[moffset + ip_offset]));
5962                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5963                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5964
5965                 return true;
5966         default:
5967                 return false;
5968         }
5969 }
5970
5971 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5972                                  u8 vf_id, u8 network_port_id)
5973 {
5974         u32 port_number = 0;
5975
5976         if (port_type == HOST_PORT) {
5977                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5978                                 pf_id);
5979                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5980                                 vf_id);
5981                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5982         } else {
5983                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5984                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5985                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5986         }
5987
5988         return port_number;
5989 }
5990
5991 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5992                                        __le32 *key_x, __le32 *key_y,
5993                                        struct hclge_fd_rule *rule)
5994 {
5995         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5996         u8 cur_pos = 0, tuple_size, shift_bits;
5997         unsigned int i;
5998
5999         for (i = 0; i < MAX_META_DATA; i++) {
6000                 tuple_size = meta_data_key_info[i].key_length;
6001                 tuple_bit = key_cfg->meta_data_active & BIT(i);
6002
6003                 switch (tuple_bit) {
6004                 case BIT(ROCE_TYPE):
6005                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
6006                         cur_pos += tuple_size;
6007                         break;
6008                 case BIT(DST_VPORT):
6009                         port_number = hclge_get_port_number(HOST_PORT, 0,
6010                                                             rule->vf_id, 0);
6011                         hnae3_set_field(meta_data,
6012                                         GENMASK(cur_pos + tuple_size, cur_pos),
6013                                         cur_pos, port_number);
6014                         cur_pos += tuple_size;
6015                         break;
6016                 default:
6017                         break;
6018                 }
6019         }
6020
6021         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
6022         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
6023         shift_bits = sizeof(meta_data) * 8 - cur_pos;
6024
6025         *key_x = cpu_to_le32(tmp_x << shift_bits);
6026         *key_y = cpu_to_le32(tmp_y << shift_bits);
6027 }
6028
6029 /* A complete key is combined with meta data key and tuple key.
6030  * Meta data key is stored at the MSB region, and tuple key is stored at
6031  * the LSB region, unused bits will be filled 0.
6032  */
6033 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
6034                             struct hclge_fd_rule *rule)
6035 {
6036         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
6037         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
6038         u8 *cur_key_x, *cur_key_y;
6039         u8 meta_data_region;
6040         u8 tuple_size;
6041         int ret;
6042         u32 i;
6043
6044         memset(key_x, 0, sizeof(key_x));
6045         memset(key_y, 0, sizeof(key_y));
6046         cur_key_x = key_x;
6047         cur_key_y = key_y;
6048
6049         for (i = 0; i < MAX_TUPLE; i++) {
6050                 bool tuple_valid;
6051
6052                 tuple_size = tuple_key_info[i].key_length / 8;
6053                 if (!(key_cfg->tuple_active & BIT(i)))
6054                         continue;
6055
6056                 tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
6057                                                      cur_key_y, rule);
6058                 if (tuple_valid) {
6059                         cur_key_x += tuple_size;
6060                         cur_key_y += tuple_size;
6061                 }
6062         }
6063
6064         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
6065                         MAX_META_DATA_LENGTH / 8;
6066
6067         hclge_fd_convert_meta_data(key_cfg,
6068                                    (__le32 *)(key_x + meta_data_region),
6069                                    (__le32 *)(key_y + meta_data_region),
6070                                    rule);
6071
6072         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
6073                                    true);
6074         if (ret) {
6075                 dev_err(&hdev->pdev->dev,
6076                         "fd key_y config fail, loc=%u, ret=%d\n",
6077                         rule->queue_id, ret);
6078                 return ret;
6079         }
6080
6081         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
6082                                    true);
6083         if (ret)
6084                 dev_err(&hdev->pdev->dev,
6085                         "fd key_x config fail, loc=%u, ret=%d\n",
6086                         rule->queue_id, ret);
6087         return ret;
6088 }
6089
6090 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
6091                                struct hclge_fd_rule *rule)
6092 {
6093         struct hclge_vport *vport = hdev->vport;
6094         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
6095         struct hclge_fd_ad_data ad_data;
6096
6097         memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
6098         ad_data.ad_id = rule->location;
6099
6100         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6101                 ad_data.drop_packet = true;
6102         } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
6103                 ad_data.override_tc = true;
6104                 ad_data.queue_id =
6105                         kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
6106                 ad_data.tc_size =
6107                         ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
6108         } else {
6109                 ad_data.forward_to_direct_queue = true;
6110                 ad_data.queue_id = rule->queue_id;
6111         }
6112
6113         if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) {
6114                 ad_data.use_counter = true;
6115                 ad_data.counter_id = rule->vf_id %
6116                                      hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1];
6117         } else {
6118                 ad_data.use_counter = false;
6119                 ad_data.counter_id = 0;
6120         }
6121
6122         ad_data.use_next_stage = false;
6123         ad_data.next_input_key = 0;
6124
6125         ad_data.write_rule_id_to_bd = true;
6126         ad_data.rule_id = rule->location;
6127
6128         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
6129 }
6130
6131 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
6132                                        u32 *unused_tuple)
6133 {
6134         if (!spec || !unused_tuple)
6135                 return -EINVAL;
6136
6137         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6138
6139         if (!spec->ip4src)
6140                 *unused_tuple |= BIT(INNER_SRC_IP);
6141
6142         if (!spec->ip4dst)
6143                 *unused_tuple |= BIT(INNER_DST_IP);
6144
6145         if (!spec->psrc)
6146                 *unused_tuple |= BIT(INNER_SRC_PORT);
6147
6148         if (!spec->pdst)
6149                 *unused_tuple |= BIT(INNER_DST_PORT);
6150
6151         if (!spec->tos)
6152                 *unused_tuple |= BIT(INNER_IP_TOS);
6153
6154         return 0;
6155 }
6156
6157 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
6158                                     u32 *unused_tuple)
6159 {
6160         if (!spec || !unused_tuple)
6161                 return -EINVAL;
6162
6163         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6164                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6165
6166         if (!spec->ip4src)
6167                 *unused_tuple |= BIT(INNER_SRC_IP);
6168
6169         if (!spec->ip4dst)
6170                 *unused_tuple |= BIT(INNER_DST_IP);
6171
6172         if (!spec->tos)
6173                 *unused_tuple |= BIT(INNER_IP_TOS);
6174
6175         if (!spec->proto)
6176                 *unused_tuple |= BIT(INNER_IP_PROTO);
6177
6178         if (spec->l4_4_bytes)
6179                 return -EOPNOTSUPP;
6180
6181         if (spec->ip_ver != ETH_RX_NFC_IP4)
6182                 return -EOPNOTSUPP;
6183
6184         return 0;
6185 }
6186
6187 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
6188                                        u32 *unused_tuple)
6189 {
6190         if (!spec || !unused_tuple)
6191                 return -EINVAL;
6192
6193         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6194
6195         /* check whether src/dst ip address used */
6196         if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6197                 *unused_tuple |= BIT(INNER_SRC_IP);
6198
6199         if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6200                 *unused_tuple |= BIT(INNER_DST_IP);
6201
6202         if (!spec->psrc)
6203                 *unused_tuple |= BIT(INNER_SRC_PORT);
6204
6205         if (!spec->pdst)
6206                 *unused_tuple |= BIT(INNER_DST_PORT);
6207
6208         if (!spec->tclass)
6209                 *unused_tuple |= BIT(INNER_IP_TOS);
6210
6211         return 0;
6212 }
6213
6214 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
6215                                     u32 *unused_tuple)
6216 {
6217         if (!spec || !unused_tuple)
6218                 return -EINVAL;
6219
6220         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6221                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6222
6223         /* check whether src/dst ip address used */
6224         if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6225                 *unused_tuple |= BIT(INNER_SRC_IP);
6226
6227         if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6228                 *unused_tuple |= BIT(INNER_DST_IP);
6229
6230         if (!spec->l4_proto)
6231                 *unused_tuple |= BIT(INNER_IP_PROTO);
6232
6233         if (!spec->tclass)
6234                 *unused_tuple |= BIT(INNER_IP_TOS);
6235
6236         if (spec->l4_4_bytes)
6237                 return -EOPNOTSUPP;
6238
6239         return 0;
6240 }
6241
6242 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
6243 {
6244         if (!spec || !unused_tuple)
6245                 return -EINVAL;
6246
6247         *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
6248                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
6249                 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
6250
6251         if (is_zero_ether_addr(spec->h_source))
6252                 *unused_tuple |= BIT(INNER_SRC_MAC);
6253
6254         if (is_zero_ether_addr(spec->h_dest))
6255                 *unused_tuple |= BIT(INNER_DST_MAC);
6256
6257         if (!spec->h_proto)
6258                 *unused_tuple |= BIT(INNER_ETH_TYPE);
6259
6260         return 0;
6261 }
6262
6263 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
6264                                     struct ethtool_rx_flow_spec *fs,
6265                                     u32 *unused_tuple)
6266 {
6267         if (fs->flow_type & FLOW_EXT) {
6268                 if (fs->h_ext.vlan_etype) {
6269                         dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
6270                         return -EOPNOTSUPP;
6271                 }
6272
6273                 if (!fs->h_ext.vlan_tci)
6274                         *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6275
6276                 if (fs->m_ext.vlan_tci &&
6277                     be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
6278                         dev_err(&hdev->pdev->dev,
6279                                 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
6280                                 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
6281                         return -EINVAL;
6282                 }
6283         } else {
6284                 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6285         }
6286
6287         if (fs->flow_type & FLOW_MAC_EXT) {
6288                 if (hdev->fd_cfg.fd_mode !=
6289                     HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6290                         dev_err(&hdev->pdev->dev,
6291                                 "FLOW_MAC_EXT is not supported in current fd mode!\n");
6292                         return -EOPNOTSUPP;
6293                 }
6294
6295                 if (is_zero_ether_addr(fs->h_ext.h_dest))
6296                         *unused_tuple |= BIT(INNER_DST_MAC);
6297                 else
6298                         *unused_tuple &= ~BIT(INNER_DST_MAC);
6299         }
6300
6301         return 0;
6302 }
6303
6304 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
6305                                        struct hclge_fd_user_def_info *info)
6306 {
6307         switch (flow_type) {
6308         case ETHER_FLOW:
6309                 info->layer = HCLGE_FD_USER_DEF_L2;
6310                 *unused_tuple &= ~BIT(INNER_L2_RSV);
6311                 break;
6312         case IP_USER_FLOW:
6313         case IPV6_USER_FLOW:
6314                 info->layer = HCLGE_FD_USER_DEF_L3;
6315                 *unused_tuple &= ~BIT(INNER_L3_RSV);
6316                 break;
6317         case TCP_V4_FLOW:
6318         case UDP_V4_FLOW:
6319         case TCP_V6_FLOW:
6320         case UDP_V6_FLOW:
6321                 info->layer = HCLGE_FD_USER_DEF_L4;
6322                 *unused_tuple &= ~BIT(INNER_L4_RSV);
6323                 break;
6324         default:
6325                 return -EOPNOTSUPP;
6326         }
6327
6328         return 0;
6329 }
6330
6331 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6332 {
6333         return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6334 }
6335
6336 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6337                                          struct ethtool_rx_flow_spec *fs,
6338                                          u32 *unused_tuple,
6339                                          struct hclge_fd_user_def_info *info)
6340 {
6341         u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6342         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6343         u16 data, offset, data_mask, offset_mask;
6344         int ret;
6345
6346         info->layer = HCLGE_FD_USER_DEF_NONE;
6347         *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6348
6349         if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6350                 return 0;
6351
6352         /* user-def data from ethtool is 64 bit value, the bit0~15 is used
6353          * for data, and bit32~47 is used for offset.
6354          */
6355         data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6356         data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6357         offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6358         offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6359
6360         if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6361                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6362                 return -EOPNOTSUPP;
6363         }
6364
6365         if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6366                 dev_err(&hdev->pdev->dev,
6367                         "user-def offset[%u] should be no more than %u\n",
6368                         offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6369                 return -EINVAL;
6370         }
6371
6372         if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6373                 dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6374                 return -EINVAL;
6375         }
6376
6377         ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6378         if (ret) {
6379                 dev_err(&hdev->pdev->dev,
6380                         "unsupported flow type for user-def bytes, ret = %d\n",
6381                         ret);
6382                 return ret;
6383         }
6384
6385         info->data = data;
6386         info->data_mask = data_mask;
6387         info->offset = offset;
6388
6389         return 0;
6390 }
6391
6392 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6393                                struct ethtool_rx_flow_spec *fs,
6394                                u32 *unused_tuple,
6395                                struct hclge_fd_user_def_info *info)
6396 {
6397         u32 flow_type;
6398         int ret;
6399
6400         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6401                 dev_err(&hdev->pdev->dev,
6402                         "failed to config fd rules, invalid rule location: %u, max is %u\n.",
6403                         fs->location,
6404                         hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6405                 return -EINVAL;
6406         }
6407
6408         ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6409         if (ret)
6410                 return ret;
6411
6412         flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6413         switch (flow_type) {
6414         case SCTP_V4_FLOW:
6415         case TCP_V4_FLOW:
6416         case UDP_V4_FLOW:
6417                 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6418                                                   unused_tuple);
6419                 break;
6420         case IP_USER_FLOW:
6421                 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6422                                                unused_tuple);
6423                 break;
6424         case SCTP_V6_FLOW:
6425         case TCP_V6_FLOW:
6426         case UDP_V6_FLOW:
6427                 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6428                                                   unused_tuple);
6429                 break;
6430         case IPV6_USER_FLOW:
6431                 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6432                                                unused_tuple);
6433                 break;
6434         case ETHER_FLOW:
6435                 if (hdev->fd_cfg.fd_mode !=
6436                         HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6437                         dev_err(&hdev->pdev->dev,
6438                                 "ETHER_FLOW is not supported in current fd mode!\n");
6439                         return -EOPNOTSUPP;
6440                 }
6441
6442                 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6443                                                  unused_tuple);
6444                 break;
6445         default:
6446                 dev_err(&hdev->pdev->dev,
6447                         "unsupported protocol type, protocol type = %#x\n",
6448                         flow_type);
6449                 return -EOPNOTSUPP;
6450         }
6451
6452         if (ret) {
6453                 dev_err(&hdev->pdev->dev,
6454                         "failed to check flow union tuple, ret = %d\n",
6455                         ret);
6456                 return ret;
6457         }
6458
6459         return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6460 }
6461
6462 static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
6463                                       struct ethtool_rx_flow_spec *fs,
6464                                       struct hclge_fd_rule *rule, u8 ip_proto)
6465 {
6466         rule->tuples.src_ip[IPV4_INDEX] =
6467                         be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6468         rule->tuples_mask.src_ip[IPV4_INDEX] =
6469                         be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6470
6471         rule->tuples.dst_ip[IPV4_INDEX] =
6472                         be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6473         rule->tuples_mask.dst_ip[IPV4_INDEX] =
6474                         be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6475
6476         rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6477         rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6478
6479         rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6480         rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6481
6482         rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6483         rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6484
6485         rule->tuples.ether_proto = ETH_P_IP;
6486         rule->tuples_mask.ether_proto = 0xFFFF;
6487
6488         rule->tuples.ip_proto = ip_proto;
6489         rule->tuples_mask.ip_proto = 0xFF;
6490 }
6491
6492 static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
6493                                    struct ethtool_rx_flow_spec *fs,
6494                                    struct hclge_fd_rule *rule)
6495 {
6496         rule->tuples.src_ip[IPV4_INDEX] =
6497                         be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6498         rule->tuples_mask.src_ip[IPV4_INDEX] =
6499                         be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6500
6501         rule->tuples.dst_ip[IPV4_INDEX] =
6502                         be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6503         rule->tuples_mask.dst_ip[IPV4_INDEX] =
6504                         be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6505
6506         rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6507         rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6508
6509         rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6510         rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6511
6512         rule->tuples.ether_proto = ETH_P_IP;
6513         rule->tuples_mask.ether_proto = 0xFFFF;
6514 }
6515
6516 static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
6517                                       struct ethtool_rx_flow_spec *fs,
6518                                       struct hclge_fd_rule *rule, u8 ip_proto)
6519 {
6520         be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6521                           IPV6_SIZE);
6522         be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6523                           IPV6_SIZE);
6524
6525         be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6526                           IPV6_SIZE);
6527         be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6528                           IPV6_SIZE);
6529
6530         rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6531         rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6532
6533         rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6534         rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6535
6536         rule->tuples.ether_proto = ETH_P_IPV6;
6537         rule->tuples_mask.ether_proto = 0xFFFF;
6538
6539         rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6540         rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6541
6542         rule->tuples.ip_proto = ip_proto;
6543         rule->tuples_mask.ip_proto = 0xFF;
6544 }
6545
6546 static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6547                                    struct ethtool_rx_flow_spec *fs,
6548                                    struct hclge_fd_rule *rule)
6549 {
6550         be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6551                           IPV6_SIZE);
6552         be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6553                           IPV6_SIZE);
6554
6555         be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6556                           IPV6_SIZE);
6557         be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6558                           IPV6_SIZE);
6559
6560         rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6561         rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6562
6563         rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6564         rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6565
6566         rule->tuples.ether_proto = ETH_P_IPV6;
6567         rule->tuples_mask.ether_proto = 0xFFFF;
6568 }
6569
6570 static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6571                                      struct ethtool_rx_flow_spec *fs,
6572                                      struct hclge_fd_rule *rule)
6573 {
6574         ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6575         ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6576
6577         ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6578         ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6579
6580         rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6581         rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6582 }
6583
6584 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6585                                         struct hclge_fd_rule *rule)
6586 {
6587         switch (info->layer) {
6588         case HCLGE_FD_USER_DEF_L2:
6589                 rule->tuples.l2_user_def = info->data;
6590                 rule->tuples_mask.l2_user_def = info->data_mask;
6591                 break;
6592         case HCLGE_FD_USER_DEF_L3:
6593                 rule->tuples.l3_user_def = info->data;
6594                 rule->tuples_mask.l3_user_def = info->data_mask;
6595                 break;
6596         case HCLGE_FD_USER_DEF_L4:
6597                 rule->tuples.l4_user_def = (u32)info->data << 16;
6598                 rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6599                 break;
6600         default:
6601                 break;
6602         }
6603
6604         rule->ep.user_def = *info;
6605 }
6606
6607 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6608                               struct ethtool_rx_flow_spec *fs,
6609                               struct hclge_fd_rule *rule,
6610                               struct hclge_fd_user_def_info *info)
6611 {
6612         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6613
6614         switch (flow_type) {
6615         case SCTP_V4_FLOW:
6616                 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6617                 break;
6618         case TCP_V4_FLOW:
6619                 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6620                 break;
6621         case UDP_V4_FLOW:
6622                 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6623                 break;
6624         case IP_USER_FLOW:
6625                 hclge_fd_get_ip4_tuple(hdev, fs, rule);
6626                 break;
6627         case SCTP_V6_FLOW:
6628                 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6629                 break;
6630         case TCP_V6_FLOW:
6631                 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6632                 break;
6633         case UDP_V6_FLOW:
6634                 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6635                 break;
6636         case IPV6_USER_FLOW:
6637                 hclge_fd_get_ip6_tuple(hdev, fs, rule);
6638                 break;
6639         case ETHER_FLOW:
6640                 hclge_fd_get_ether_tuple(hdev, fs, rule);
6641                 break;
6642         default:
6643                 return -EOPNOTSUPP;
6644         }
6645
6646         if (fs->flow_type & FLOW_EXT) {
6647                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6648                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6649                 hclge_fd_get_user_def_tuple(info, rule);
6650         }
6651
6652         if (fs->flow_type & FLOW_MAC_EXT) {
6653                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6654                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6655         }
6656
6657         return 0;
6658 }
6659
6660 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6661                                 struct hclge_fd_rule *rule)
6662 {
6663         int ret;
6664
6665         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6666         if (ret)
6667                 return ret;
6668
6669         return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6670 }
6671
6672 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6673                                      struct hclge_fd_rule *rule)
6674 {
6675         int ret;
6676
6677         spin_lock_bh(&hdev->fd_rule_lock);
6678
6679         if (hdev->fd_active_type != rule->rule_type &&
6680             (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6681              hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6682                 dev_err(&hdev->pdev->dev,
6683                         "mode conflict(new type %d, active type %d), please delete existent rules first\n",
6684                         rule->rule_type, hdev->fd_active_type);
6685                 spin_unlock_bh(&hdev->fd_rule_lock);
6686                 return -EINVAL;
6687         }
6688
6689         ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6690         if (ret)
6691                 goto out;
6692
6693         ret = hclge_clear_arfs_rules(hdev);
6694         if (ret)
6695                 goto out;
6696
6697         ret = hclge_fd_config_rule(hdev, rule);
6698         if (ret)
6699                 goto out;
6700
6701         rule->state = HCLGE_FD_ACTIVE;
6702         hdev->fd_active_type = rule->rule_type;
6703         hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6704
6705 out:
6706         spin_unlock_bh(&hdev->fd_rule_lock);
6707         return ret;
6708 }
6709
6710 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6711 {
6712         struct hclge_vport *vport = hclge_get_vport(handle);
6713         struct hclge_dev *hdev = vport->back;
6714
6715         return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6716 }
6717
6718 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6719                                       u16 *vport_id, u8 *action, u16 *queue_id)
6720 {
6721         struct hclge_vport *vport = hdev->vport;
6722
6723         if (ring_cookie == RX_CLS_FLOW_DISC) {
6724                 *action = HCLGE_FD_ACTION_DROP_PACKET;
6725         } else {
6726                 u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6727                 u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6728                 u16 tqps;
6729
6730                 /* To keep consistent with user's configuration, minus 1 when
6731                  * printing 'vf', because vf id from ethtool is added 1 for vf.
6732                  */
6733                 if (vf > hdev->num_req_vfs) {
6734                         dev_err(&hdev->pdev->dev,
6735                                 "Error: vf id (%u) should be less than %u\n",
6736                                 vf - 1, hdev->num_req_vfs);
6737                         return -EINVAL;
6738                 }
6739
6740                 *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6741                 tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6742
6743                 if (ring >= tqps) {
6744                         dev_err(&hdev->pdev->dev,
6745                                 "Error: queue id (%u) > max tqp num (%u)\n",
6746                                 ring, tqps - 1);
6747                         return -EINVAL;
6748                 }
6749
6750                 *action = HCLGE_FD_ACTION_SELECT_QUEUE;
6751                 *queue_id = ring;
6752         }
6753
6754         return 0;
6755 }
6756
6757 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6758                               struct ethtool_rxnfc *cmd)
6759 {
6760         struct hclge_vport *vport = hclge_get_vport(handle);
6761         struct hclge_dev *hdev = vport->back;
6762         struct hclge_fd_user_def_info info;
6763         u16 dst_vport_id = 0, q_index = 0;
6764         struct ethtool_rx_flow_spec *fs;
6765         struct hclge_fd_rule *rule;
6766         u32 unused = 0;
6767         u8 action;
6768         int ret;
6769
6770         if (!hnae3_dev_fd_supported(hdev)) {
6771                 dev_err(&hdev->pdev->dev,
6772                         "flow table director is not supported\n");
6773                 return -EOPNOTSUPP;
6774         }
6775
6776         if (!hdev->fd_en) {
6777                 dev_err(&hdev->pdev->dev,
6778                         "please enable flow director first\n");
6779                 return -EOPNOTSUPP;
6780         }
6781
6782         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6783
6784         ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6785         if (ret)
6786                 return ret;
6787
6788         ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6789                                          &action, &q_index);
6790         if (ret)
6791                 return ret;
6792
6793         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6794         if (!rule)
6795                 return -ENOMEM;
6796
6797         ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
6798         if (ret) {
6799                 kfree(rule);
6800                 return ret;
6801         }
6802
6803         rule->flow_type = fs->flow_type;
6804         rule->location = fs->location;
6805         rule->unused_tuple = unused;
6806         rule->vf_id = dst_vport_id;
6807         rule->queue_id = q_index;
6808         rule->action = action;
6809         rule->rule_type = HCLGE_FD_EP_ACTIVE;
6810
6811         ret = hclge_add_fd_entry_common(hdev, rule);
6812         if (ret)
6813                 kfree(rule);
6814
6815         return ret;
6816 }
6817
6818 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6819                               struct ethtool_rxnfc *cmd)
6820 {
6821         struct hclge_vport *vport = hclge_get_vport(handle);
6822         struct hclge_dev *hdev = vport->back;
6823         struct ethtool_rx_flow_spec *fs;
6824         int ret;
6825
6826         if (!hnae3_dev_fd_supported(hdev))
6827                 return -EOPNOTSUPP;
6828
6829         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6830
6831         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6832                 return -EINVAL;
6833
6834         spin_lock_bh(&hdev->fd_rule_lock);
6835         if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6836             !test_bit(fs->location, hdev->fd_bmap)) {
6837                 dev_err(&hdev->pdev->dev,
6838                         "Delete fail, rule %u is inexistent\n", fs->location);
6839                 spin_unlock_bh(&hdev->fd_rule_lock);
6840                 return -ENOENT;
6841         }
6842
6843         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6844                                    NULL, false);
6845         if (ret)
6846                 goto out;
6847
6848         hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6849
6850 out:
6851         spin_unlock_bh(&hdev->fd_rule_lock);
6852         return ret;
6853 }
6854
6855 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6856                                          bool clear_list)
6857 {
6858         struct hclge_fd_rule *rule;
6859         struct hlist_node *node;
6860         u16 location;
6861
6862         if (!hnae3_dev_fd_supported(hdev))
6863                 return;
6864
6865         spin_lock_bh(&hdev->fd_rule_lock);
6866
6867         for_each_set_bit(location, hdev->fd_bmap,
6868                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6869                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6870                                      NULL, false);
6871
6872         if (clear_list) {
6873                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6874                                           rule_node) {
6875                         hlist_del(&rule->rule_node);
6876                         kfree(rule);
6877                 }
6878                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6879                 hdev->hclge_fd_rule_num = 0;
6880                 bitmap_zero(hdev->fd_bmap,
6881                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6882         }
6883
6884         spin_unlock_bh(&hdev->fd_rule_lock);
6885 }
6886
6887 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6888 {
6889         hclge_clear_fd_rules_in_list(hdev, true);
6890         hclge_fd_disable_user_def(hdev);
6891 }
6892
6893 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6894 {
6895         struct hclge_vport *vport = hclge_get_vport(handle);
6896         struct hclge_dev *hdev = vport->back;
6897         struct hclge_fd_rule *rule;
6898         struct hlist_node *node;
6899
6900         /* Return ok here, because reset error handling will check this
6901          * return value. If error is returned here, the reset process will
6902          * fail.
6903          */
6904         if (!hnae3_dev_fd_supported(hdev))
6905                 return 0;
6906
6907         /* if fd is disabled, should not restore it when reset */
6908         if (!hdev->fd_en)
6909                 return 0;
6910
6911         spin_lock_bh(&hdev->fd_rule_lock);
6912         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6913                 if (rule->state == HCLGE_FD_ACTIVE)
6914                         rule->state = HCLGE_FD_TO_ADD;
6915         }
6916         spin_unlock_bh(&hdev->fd_rule_lock);
6917         set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6918
6919         return 0;
6920 }
6921
6922 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6923                                  struct ethtool_rxnfc *cmd)
6924 {
6925         struct hclge_vport *vport = hclge_get_vport(handle);
6926         struct hclge_dev *hdev = vport->back;
6927
6928         if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6929                 return -EOPNOTSUPP;
6930
6931         cmd->rule_cnt = hdev->hclge_fd_rule_num;
6932         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6933
6934         return 0;
6935 }
6936
6937 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6938                                      struct ethtool_tcpip4_spec *spec,
6939                                      struct ethtool_tcpip4_spec *spec_mask)
6940 {
6941         spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6942         spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6943                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6944
6945         spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6946         spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6947                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6948
6949         spec->psrc = cpu_to_be16(rule->tuples.src_port);
6950         spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6951                         0 : cpu_to_be16(rule->tuples_mask.src_port);
6952
6953         spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6954         spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6955                         0 : cpu_to_be16(rule->tuples_mask.dst_port);
6956
6957         spec->tos = rule->tuples.ip_tos;
6958         spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6959                         0 : rule->tuples_mask.ip_tos;
6960 }
6961
6962 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6963                                   struct ethtool_usrip4_spec *spec,
6964                                   struct ethtool_usrip4_spec *spec_mask)
6965 {
6966         spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6967         spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6968                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6969
6970         spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6971         spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6972                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6973
6974         spec->tos = rule->tuples.ip_tos;
6975         spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6976                         0 : rule->tuples_mask.ip_tos;
6977
6978         spec->proto = rule->tuples.ip_proto;
6979         spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6980                         0 : rule->tuples_mask.ip_proto;
6981
6982         spec->ip_ver = ETH_RX_NFC_IP4;
6983 }
6984
6985 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6986                                      struct ethtool_tcpip6_spec *spec,
6987                                      struct ethtool_tcpip6_spec *spec_mask)
6988 {
6989         cpu_to_be32_array(spec->ip6src,
6990                           rule->tuples.src_ip, IPV6_SIZE);
6991         cpu_to_be32_array(spec->ip6dst,
6992                           rule->tuples.dst_ip, IPV6_SIZE);
6993         if (rule->unused_tuple & BIT(INNER_SRC_IP))
6994                 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6995         else
6996                 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6997                                   IPV6_SIZE);
6998
6999         if (rule->unused_tuple & BIT(INNER_DST_IP))
7000                 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
7001         else
7002                 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
7003                                   IPV6_SIZE);
7004
7005         spec->tclass = rule->tuples.ip_tos;
7006         spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
7007                         0 : rule->tuples_mask.ip_tos;
7008
7009         spec->psrc = cpu_to_be16(rule->tuples.src_port);
7010         spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
7011                         0 : cpu_to_be16(rule->tuples_mask.src_port);
7012
7013         spec->pdst = cpu_to_be16(rule->tuples.dst_port);
7014         spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
7015                         0 : cpu_to_be16(rule->tuples_mask.dst_port);
7016 }
7017
7018 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
7019                                   struct ethtool_usrip6_spec *spec,
7020                                   struct ethtool_usrip6_spec *spec_mask)
7021 {
7022         cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
7023         cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
7024         if (rule->unused_tuple & BIT(INNER_SRC_IP))
7025                 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
7026         else
7027                 cpu_to_be32_array(spec_mask->ip6src,
7028                                   rule->tuples_mask.src_ip, IPV6_SIZE);
7029
7030         if (rule->unused_tuple & BIT(INNER_DST_IP))
7031                 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
7032         else
7033                 cpu_to_be32_array(spec_mask->ip6dst,
7034                                   rule->tuples_mask.dst_ip, IPV6_SIZE);
7035
7036         spec->tclass = rule->tuples.ip_tos;
7037         spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
7038                         0 : rule->tuples_mask.ip_tos;
7039
7040         spec->l4_proto = rule->tuples.ip_proto;
7041         spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
7042                         0 : rule->tuples_mask.ip_proto;
7043 }
7044
7045 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
7046                                     struct ethhdr *spec,
7047                                     struct ethhdr *spec_mask)
7048 {
7049         ether_addr_copy(spec->h_source, rule->tuples.src_mac);
7050         ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
7051
7052         if (rule->unused_tuple & BIT(INNER_SRC_MAC))
7053                 eth_zero_addr(spec_mask->h_source);
7054         else
7055                 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
7056
7057         if (rule->unused_tuple & BIT(INNER_DST_MAC))
7058                 eth_zero_addr(spec_mask->h_dest);
7059         else
7060                 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
7061
7062         spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
7063         spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
7064                         0 : cpu_to_be16(rule->tuples_mask.ether_proto);
7065 }
7066
7067 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
7068                                        struct hclge_fd_rule *rule)
7069 {
7070         if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
7071             HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
7072                 fs->h_ext.data[0] = 0;
7073                 fs->h_ext.data[1] = 0;
7074                 fs->m_ext.data[0] = 0;
7075                 fs->m_ext.data[1] = 0;
7076         } else {
7077                 fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
7078                 fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
7079                 fs->m_ext.data[0] =
7080                                 cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
7081                 fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
7082         }
7083 }
7084
7085 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
7086                                   struct hclge_fd_rule *rule)
7087 {
7088         if (fs->flow_type & FLOW_EXT) {
7089                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
7090                 fs->m_ext.vlan_tci =
7091                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
7092                                 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
7093
7094                 hclge_fd_get_user_def_info(fs, rule);
7095         }
7096
7097         if (fs->flow_type & FLOW_MAC_EXT) {
7098                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
7099                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
7100                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
7101                 else
7102                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
7103                                         rule->tuples_mask.dst_mac);
7104         }
7105 }
7106
7107 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
7108                                   struct ethtool_rxnfc *cmd)
7109 {
7110         struct hclge_vport *vport = hclge_get_vport(handle);
7111         struct hclge_fd_rule *rule = NULL;
7112         struct hclge_dev *hdev = vport->back;
7113         struct ethtool_rx_flow_spec *fs;
7114         struct hlist_node *node2;
7115
7116         if (!hnae3_dev_fd_supported(hdev))
7117                 return -EOPNOTSUPP;
7118
7119         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
7120
7121         spin_lock_bh(&hdev->fd_rule_lock);
7122
7123         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
7124                 if (rule->location >= fs->location)
7125                         break;
7126         }
7127
7128         if (!rule || fs->location != rule->location) {
7129                 spin_unlock_bh(&hdev->fd_rule_lock);
7130
7131                 return -ENOENT;
7132         }
7133
7134         fs->flow_type = rule->flow_type;
7135         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
7136         case SCTP_V4_FLOW:
7137         case TCP_V4_FLOW:
7138         case UDP_V4_FLOW:
7139                 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
7140                                          &fs->m_u.tcp_ip4_spec);
7141                 break;
7142         case IP_USER_FLOW:
7143                 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
7144                                       &fs->m_u.usr_ip4_spec);
7145                 break;
7146         case SCTP_V6_FLOW:
7147         case TCP_V6_FLOW:
7148         case UDP_V6_FLOW:
7149                 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
7150                                          &fs->m_u.tcp_ip6_spec);
7151                 break;
7152         case IPV6_USER_FLOW:
7153                 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
7154                                       &fs->m_u.usr_ip6_spec);
7155                 break;
7156         /* The flow type of fd rule has been checked before adding in to rule
7157          * list. As other flow types have been handled, it must be ETHER_FLOW
7158          * for the default case
7159          */
7160         default:
7161                 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
7162                                         &fs->m_u.ether_spec);
7163                 break;
7164         }
7165
7166         hclge_fd_get_ext_info(fs, rule);
7167
7168         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
7169                 fs->ring_cookie = RX_CLS_FLOW_DISC;
7170         } else {
7171                 u64 vf_id;
7172
7173                 fs->ring_cookie = rule->queue_id;
7174                 vf_id = rule->vf_id;
7175                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
7176                 fs->ring_cookie |= vf_id;
7177         }
7178
7179         spin_unlock_bh(&hdev->fd_rule_lock);
7180
7181         return 0;
7182 }
7183
7184 static int hclge_get_all_rules(struct hnae3_handle *handle,
7185                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
7186 {
7187         struct hclge_vport *vport = hclge_get_vport(handle);
7188         struct hclge_dev *hdev = vport->back;
7189         struct hclge_fd_rule *rule;
7190         struct hlist_node *node2;
7191         int cnt = 0;
7192
7193         if (!hnae3_dev_fd_supported(hdev))
7194                 return -EOPNOTSUPP;
7195
7196         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
7197
7198         spin_lock_bh(&hdev->fd_rule_lock);
7199         hlist_for_each_entry_safe(rule, node2,
7200                                   &hdev->fd_rule_list, rule_node) {
7201                 if (cnt == cmd->rule_cnt) {
7202                         spin_unlock_bh(&hdev->fd_rule_lock);
7203                         return -EMSGSIZE;
7204                 }
7205
7206                 if (rule->state == HCLGE_FD_TO_DEL)
7207                         continue;
7208
7209                 rule_locs[cnt] = rule->location;
7210                 cnt++;
7211         }
7212
7213         spin_unlock_bh(&hdev->fd_rule_lock);
7214
7215         cmd->rule_cnt = cnt;
7216
7217         return 0;
7218 }
7219
7220 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
7221                                      struct hclge_fd_rule_tuples *tuples)
7222 {
7223 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
7224 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
7225
7226         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
7227         tuples->ip_proto = fkeys->basic.ip_proto;
7228         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
7229
7230         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
7231                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
7232                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
7233         } else {
7234                 int i;
7235
7236                 for (i = 0; i < IPV6_SIZE; i++) {
7237                         tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
7238                         tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
7239                 }
7240         }
7241 }
7242
7243 /* traverse all rules, check whether an existed rule has the same tuples */
7244 static struct hclge_fd_rule *
7245 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
7246                           const struct hclge_fd_rule_tuples *tuples)
7247 {
7248         struct hclge_fd_rule *rule = NULL;
7249         struct hlist_node *node;
7250
7251         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7252                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
7253                         return rule;
7254         }
7255
7256         return NULL;
7257 }
7258
7259 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
7260                                      struct hclge_fd_rule *rule)
7261 {
7262         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
7263                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
7264                              BIT(INNER_SRC_PORT);
7265         rule->action = 0;
7266         rule->vf_id = 0;
7267         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
7268         rule->state = HCLGE_FD_TO_ADD;
7269         if (tuples->ether_proto == ETH_P_IP) {
7270                 if (tuples->ip_proto == IPPROTO_TCP)
7271                         rule->flow_type = TCP_V4_FLOW;
7272                 else
7273                         rule->flow_type = UDP_V4_FLOW;
7274         } else {
7275                 if (tuples->ip_proto == IPPROTO_TCP)
7276                         rule->flow_type = TCP_V6_FLOW;
7277                 else
7278                         rule->flow_type = UDP_V6_FLOW;
7279         }
7280         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
7281         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7282 }
7283
7284 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
7285                                       u16 flow_id, struct flow_keys *fkeys)
7286 {
7287         struct hclge_vport *vport = hclge_get_vport(handle);
7288         struct hclge_fd_rule_tuples new_tuples = {};
7289         struct hclge_dev *hdev = vport->back;
7290         struct hclge_fd_rule *rule;
7291         u16 bit_id;
7292
7293         if (!hnae3_dev_fd_supported(hdev))
7294                 return -EOPNOTSUPP;
7295
7296         /* when there is already fd rule existed add by user,
7297          * arfs should not work
7298          */
7299         spin_lock_bh(&hdev->fd_rule_lock);
7300         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7301             hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7302                 spin_unlock_bh(&hdev->fd_rule_lock);
7303                 return -EOPNOTSUPP;
7304         }
7305
7306         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7307
7308         /* check is there flow director filter existed for this flow,
7309          * if not, create a new filter for it;
7310          * if filter exist with different queue id, modify the filter;
7311          * if filter exist with same queue id, do nothing
7312          */
7313         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7314         if (!rule) {
7315                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7316                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7317                         spin_unlock_bh(&hdev->fd_rule_lock);
7318                         return -ENOSPC;
7319                 }
7320
7321                 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7322                 if (!rule) {
7323                         spin_unlock_bh(&hdev->fd_rule_lock);
7324                         return -ENOMEM;
7325                 }
7326
7327                 rule->location = bit_id;
7328                 rule->arfs.flow_id = flow_id;
7329                 rule->queue_id = queue_id;
7330                 hclge_fd_build_arfs_rule(&new_tuples, rule);
7331                 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7332                 hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7333         } else if (rule->queue_id != queue_id) {
7334                 rule->queue_id = queue_id;
7335                 rule->state = HCLGE_FD_TO_ADD;
7336                 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7337                 hclge_task_schedule(hdev, 0);
7338         }
7339         spin_unlock_bh(&hdev->fd_rule_lock);
7340         return rule->location;
7341 }
7342
7343 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7344 {
7345 #ifdef CONFIG_RFS_ACCEL
7346         struct hnae3_handle *handle = &hdev->vport[0].nic;
7347         struct hclge_fd_rule *rule;
7348         struct hlist_node *node;
7349
7350         spin_lock_bh(&hdev->fd_rule_lock);
7351         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7352                 spin_unlock_bh(&hdev->fd_rule_lock);
7353                 return;
7354         }
7355         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7356                 if (rule->state != HCLGE_FD_ACTIVE)
7357                         continue;
7358                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7359                                         rule->arfs.flow_id, rule->location)) {
7360                         rule->state = HCLGE_FD_TO_DEL;
7361                         set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7362                 }
7363         }
7364         spin_unlock_bh(&hdev->fd_rule_lock);
7365 #endif
7366 }
7367
7368 /* make sure being called after lock up with fd_rule_lock */
7369 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7370 {
7371 #ifdef CONFIG_RFS_ACCEL
7372         struct hclge_fd_rule *rule;
7373         struct hlist_node *node;
7374         int ret;
7375
7376         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7377                 return 0;
7378
7379         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7380                 switch (rule->state) {
7381                 case HCLGE_FD_TO_DEL:
7382                 case HCLGE_FD_ACTIVE:
7383                         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7384                                                    rule->location, NULL, false);
7385                         if (ret)
7386                                 return ret;
7387                         fallthrough;
7388                 case HCLGE_FD_TO_ADD:
7389                         hclge_fd_dec_rule_cnt(hdev, rule->location);
7390                         hlist_del(&rule->rule_node);
7391                         kfree(rule);
7392                         break;
7393                 default:
7394                         break;
7395                 }
7396         }
7397         hclge_sync_fd_state(hdev);
7398
7399 #endif
7400         return 0;
7401 }
7402
7403 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7404                                     struct hclge_fd_rule *rule)
7405 {
7406         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7407                 struct flow_match_basic match;
7408                 u16 ethtype_key, ethtype_mask;
7409
7410                 flow_rule_match_basic(flow, &match);
7411                 ethtype_key = ntohs(match.key->n_proto);
7412                 ethtype_mask = ntohs(match.mask->n_proto);
7413
7414                 if (ethtype_key == ETH_P_ALL) {
7415                         ethtype_key = 0;
7416                         ethtype_mask = 0;
7417                 }
7418                 rule->tuples.ether_proto = ethtype_key;
7419                 rule->tuples_mask.ether_proto = ethtype_mask;
7420                 rule->tuples.ip_proto = match.key->ip_proto;
7421                 rule->tuples_mask.ip_proto = match.mask->ip_proto;
7422         } else {
7423                 rule->unused_tuple |= BIT(INNER_IP_PROTO);
7424                 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7425         }
7426 }
7427
7428 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7429                                   struct hclge_fd_rule *rule)
7430 {
7431         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7432                 struct flow_match_eth_addrs match;
7433
7434                 flow_rule_match_eth_addrs(flow, &match);
7435                 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7436                 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7437                 ether_addr_copy(rule->tuples.src_mac, match.key->src);
7438                 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7439         } else {
7440                 rule->unused_tuple |= BIT(INNER_DST_MAC);
7441                 rule->unused_tuple |= BIT(INNER_SRC_MAC);
7442         }
7443 }
7444
7445 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7446                                    struct hclge_fd_rule *rule)
7447 {
7448         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7449                 struct flow_match_vlan match;
7450
7451                 flow_rule_match_vlan(flow, &match);
7452                 rule->tuples.vlan_tag1 = match.key->vlan_id |
7453                                 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
7454                 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7455                                 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7456         } else {
7457                 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7458         }
7459 }
7460
7461 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7462                                  struct hclge_fd_rule *rule)
7463 {
7464         u16 addr_type = 0;
7465
7466         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7467                 struct flow_match_control match;
7468
7469                 flow_rule_match_control(flow, &match);
7470                 addr_type = match.key->addr_type;
7471         }
7472
7473         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7474                 struct flow_match_ipv4_addrs match;
7475
7476                 flow_rule_match_ipv4_addrs(flow, &match);
7477                 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7478                 rule->tuples_mask.src_ip[IPV4_INDEX] =
7479                                                 be32_to_cpu(match.mask->src);
7480                 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7481                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
7482                                                 be32_to_cpu(match.mask->dst);
7483         } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7484                 struct flow_match_ipv6_addrs match;
7485
7486                 flow_rule_match_ipv6_addrs(flow, &match);
7487                 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7488                                   IPV6_SIZE);
7489                 be32_to_cpu_array(rule->tuples_mask.src_ip,
7490                                   match.mask->src.s6_addr32, IPV6_SIZE);
7491                 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7492                                   IPV6_SIZE);
7493                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
7494                                   match.mask->dst.s6_addr32, IPV6_SIZE);
7495         } else {
7496                 rule->unused_tuple |= BIT(INNER_SRC_IP);
7497                 rule->unused_tuple |= BIT(INNER_DST_IP);
7498         }
7499 }
7500
7501 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7502                                    struct hclge_fd_rule *rule)
7503 {
7504         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7505                 struct flow_match_ports match;
7506
7507                 flow_rule_match_ports(flow, &match);
7508
7509                 rule->tuples.src_port = be16_to_cpu(match.key->src);
7510                 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7511                 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7512                 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7513         } else {
7514                 rule->unused_tuple |= BIT(INNER_SRC_PORT);
7515                 rule->unused_tuple |= BIT(INNER_DST_PORT);
7516         }
7517 }
7518
7519 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7520                                   struct flow_cls_offload *cls_flower,
7521                                   struct hclge_fd_rule *rule)
7522 {
7523         struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7524         struct flow_dissector *dissector = flow->match.dissector;
7525
7526         if (dissector->used_keys &
7527             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7528               BIT(FLOW_DISSECTOR_KEY_BASIC) |
7529               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7530               BIT(FLOW_DISSECTOR_KEY_VLAN) |
7531               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7532               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7533               BIT(FLOW_DISSECTOR_KEY_PORTS))) {
7534                 dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
7535                         dissector->used_keys);
7536                 return -EOPNOTSUPP;
7537         }
7538
7539         hclge_get_cls_key_basic(flow, rule);
7540         hclge_get_cls_key_mac(flow, rule);
7541         hclge_get_cls_key_vlan(flow, rule);
7542         hclge_get_cls_key_ip(flow, rule);
7543         hclge_get_cls_key_port(flow, rule);
7544
7545         return 0;
7546 }
7547
7548 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7549                                   struct flow_cls_offload *cls_flower, int tc)
7550 {
7551         u32 prio = cls_flower->common.prio;
7552
7553         if (tc < 0 || tc > hdev->tc_max) {
7554                 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7555                 return -EINVAL;
7556         }
7557
7558         if (prio == 0 ||
7559             prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7560                 dev_err(&hdev->pdev->dev,
7561                         "prio %u should be in range[1, %u]\n",
7562                         prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7563                 return -EINVAL;
7564         }
7565
7566         if (test_bit(prio - 1, hdev->fd_bmap)) {
7567                 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7568                 return -EINVAL;
7569         }
7570         return 0;
7571 }
7572
7573 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7574                                 struct flow_cls_offload *cls_flower,
7575                                 int tc)
7576 {
7577         struct hclge_vport *vport = hclge_get_vport(handle);
7578         struct hclge_dev *hdev = vport->back;
7579         struct hclge_fd_rule *rule;
7580         int ret;
7581
7582         ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7583         if (ret) {
7584                 dev_err(&hdev->pdev->dev,
7585                         "failed to check cls flower params, ret = %d\n", ret);
7586                 return ret;
7587         }
7588
7589         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7590         if (!rule)
7591                 return -ENOMEM;
7592
7593         ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7594         if (ret) {
7595                 kfree(rule);
7596                 return ret;
7597         }
7598
7599         rule->action = HCLGE_FD_ACTION_SELECT_TC;
7600         rule->cls_flower.tc = tc;
7601         rule->location = cls_flower->common.prio - 1;
7602         rule->vf_id = 0;
7603         rule->cls_flower.cookie = cls_flower->cookie;
7604         rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7605
7606         ret = hclge_add_fd_entry_common(hdev, rule);
7607         if (ret)
7608                 kfree(rule);
7609
7610         return ret;
7611 }
7612
7613 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7614                                                    unsigned long cookie)
7615 {
7616         struct hclge_fd_rule *rule;
7617         struct hlist_node *node;
7618
7619         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7620                 if (rule->cls_flower.cookie == cookie)
7621                         return rule;
7622         }
7623
7624         return NULL;
7625 }
7626
7627 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7628                                 struct flow_cls_offload *cls_flower)
7629 {
7630         struct hclge_vport *vport = hclge_get_vport(handle);
7631         struct hclge_dev *hdev = vport->back;
7632         struct hclge_fd_rule *rule;
7633         int ret;
7634
7635         spin_lock_bh(&hdev->fd_rule_lock);
7636
7637         rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7638         if (!rule) {
7639                 spin_unlock_bh(&hdev->fd_rule_lock);
7640                 return -EINVAL;
7641         }
7642
7643         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7644                                    NULL, false);
7645         if (ret) {
7646                 spin_unlock_bh(&hdev->fd_rule_lock);
7647                 return ret;
7648         }
7649
7650         hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7651         spin_unlock_bh(&hdev->fd_rule_lock);
7652
7653         return 0;
7654 }
7655
7656 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7657 {
7658         struct hclge_fd_rule *rule;
7659         struct hlist_node *node;
7660         int ret = 0;
7661
7662         if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7663                 return;
7664
7665         spin_lock_bh(&hdev->fd_rule_lock);
7666
7667         hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7668                 switch (rule->state) {
7669                 case HCLGE_FD_TO_ADD:
7670                         ret = hclge_fd_config_rule(hdev, rule);
7671                         if (ret)
7672                                 goto out;
7673                         rule->state = HCLGE_FD_ACTIVE;
7674                         break;
7675                 case HCLGE_FD_TO_DEL:
7676                         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7677                                                    rule->location, NULL, false);
7678                         if (ret)
7679                                 goto out;
7680                         hclge_fd_dec_rule_cnt(hdev, rule->location);
7681                         hclge_fd_free_node(hdev, rule);
7682                         break;
7683                 default:
7684                         break;
7685                 }
7686         }
7687
7688 out:
7689         if (ret)
7690                 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7691
7692         spin_unlock_bh(&hdev->fd_rule_lock);
7693 }
7694
7695 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7696 {
7697         if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7698                 bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7699
7700                 hclge_clear_fd_rules_in_list(hdev, clear_list);
7701         }
7702
7703         hclge_sync_fd_user_def_cfg(hdev, false);
7704
7705         hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7706 }
7707
7708 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7709 {
7710         struct hclge_vport *vport = hclge_get_vport(handle);
7711         struct hclge_dev *hdev = vport->back;
7712
7713         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7714                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7715 }
7716
7717 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7718 {
7719         struct hclge_vport *vport = hclge_get_vport(handle);
7720         struct hclge_dev *hdev = vport->back;
7721
7722         return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7723 }
7724
7725 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7726 {
7727         struct hclge_vport *vport = hclge_get_vport(handle);
7728         struct hclge_dev *hdev = vport->back;
7729
7730         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7731 }
7732
7733 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7734 {
7735         struct hclge_vport *vport = hclge_get_vport(handle);
7736         struct hclge_dev *hdev = vport->back;
7737
7738         return hdev->rst_stats.hw_reset_done_cnt;
7739 }
7740
7741 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7742 {
7743         struct hclge_vport *vport = hclge_get_vport(handle);
7744         struct hclge_dev *hdev = vport->back;
7745
7746         hdev->fd_en = enable;
7747
7748         if (!enable)
7749                 set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7750         else
7751                 hclge_restore_fd_entries(handle);
7752
7753         hclge_task_schedule(hdev, 0);
7754 }
7755
7756 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7757 {
7758         struct hclge_desc desc;
7759         struct hclge_config_mac_mode_cmd *req =
7760                 (struct hclge_config_mac_mode_cmd *)desc.data;
7761         u32 loop_en = 0;
7762         int ret;
7763
7764         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7765
7766         if (enable) {
7767                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7768                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7769                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7770                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7771                 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7772                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7773                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7774                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7775                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7776                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7777         }
7778
7779         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7780
7781         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7782         if (ret)
7783                 dev_err(&hdev->pdev->dev,
7784                         "mac enable fail, ret =%d.\n", ret);
7785 }
7786
7787 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7788                                      u8 switch_param, u8 param_mask)
7789 {
7790         struct hclge_mac_vlan_switch_cmd *req;
7791         struct hclge_desc desc;
7792         u32 func_id;
7793         int ret;
7794
7795         func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7796         req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7797
7798         /* read current config parameter */
7799         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7800                                    true);
7801         req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7802         req->func_id = cpu_to_le32(func_id);
7803
7804         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7805         if (ret) {
7806                 dev_err(&hdev->pdev->dev,
7807                         "read mac vlan switch parameter fail, ret = %d\n", ret);
7808                 return ret;
7809         }
7810
7811         /* modify and write new config parameter */
7812         hclge_cmd_reuse_desc(&desc, false);
7813         req->switch_param = (req->switch_param & param_mask) | switch_param;
7814         req->param_mask = param_mask;
7815
7816         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7817         if (ret)
7818                 dev_err(&hdev->pdev->dev,
7819                         "set mac vlan switch parameter fail, ret = %d\n", ret);
7820         return ret;
7821 }
7822
7823 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7824                                        int link_ret)
7825 {
7826 #define HCLGE_PHY_LINK_STATUS_NUM  200
7827
7828         struct phy_device *phydev = hdev->hw.mac.phydev;
7829         int i = 0;
7830         int ret;
7831
7832         do {
7833                 ret = phy_read_status(phydev);
7834                 if (ret) {
7835                         dev_err(&hdev->pdev->dev,
7836                                 "phy update link status fail, ret = %d\n", ret);
7837                         return;
7838                 }
7839
7840                 if (phydev->link == link_ret)
7841                         break;
7842
7843                 msleep(HCLGE_LINK_STATUS_MS);
7844         } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7845 }
7846
7847 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7848 {
7849 #define HCLGE_MAC_LINK_STATUS_NUM  100
7850
7851         int link_status;
7852         int i = 0;
7853         int ret;
7854
7855         do {
7856                 ret = hclge_get_mac_link_status(hdev, &link_status);
7857                 if (ret)
7858                         return ret;
7859                 if (link_status == link_ret)
7860                         return 0;
7861
7862                 msleep(HCLGE_LINK_STATUS_MS);
7863         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7864         return -EBUSY;
7865 }
7866
7867 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7868                                           bool is_phy)
7869 {
7870         int link_ret;
7871
7872         link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7873
7874         if (is_phy)
7875                 hclge_phy_link_status_wait(hdev, link_ret);
7876
7877         return hclge_mac_link_status_wait(hdev, link_ret);
7878 }
7879
7880 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7881 {
7882         struct hclge_config_mac_mode_cmd *req;
7883         struct hclge_desc desc;
7884         u32 loop_en;
7885         int ret;
7886
7887         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7888         /* 1 Read out the MAC mode config at first */
7889         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7890         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7891         if (ret) {
7892                 dev_err(&hdev->pdev->dev,
7893                         "mac loopback get fail, ret =%d.\n", ret);
7894                 return ret;
7895         }
7896
7897         /* 2 Then setup the loopback flag */
7898         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7899         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7900
7901         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7902
7903         /* 3 Config mac work mode with loopback flag
7904          * and its original configure parameters
7905          */
7906         hclge_cmd_reuse_desc(&desc, false);
7907         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7908         if (ret)
7909                 dev_err(&hdev->pdev->dev,
7910                         "mac loopback set fail, ret =%d.\n", ret);
7911         return ret;
7912 }
7913
7914 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7915                                      enum hnae3_loop loop_mode)
7916 {
7917 #define HCLGE_COMMON_LB_RETRY_MS        10
7918 #define HCLGE_COMMON_LB_RETRY_NUM       100
7919
7920         struct hclge_common_lb_cmd *req;
7921         struct hclge_desc desc;
7922         int ret, i = 0;
7923         u8 loop_mode_b;
7924
7925         req = (struct hclge_common_lb_cmd *)desc.data;
7926         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7927
7928         switch (loop_mode) {
7929         case HNAE3_LOOP_SERIAL_SERDES:
7930                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7931                 break;
7932         case HNAE3_LOOP_PARALLEL_SERDES:
7933                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7934                 break;
7935         case HNAE3_LOOP_PHY:
7936                 loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7937                 break;
7938         default:
7939                 dev_err(&hdev->pdev->dev,
7940                         "unsupported common loopback mode %d\n", loop_mode);
7941                 return -ENOTSUPP;
7942         }
7943
7944         if (en) {
7945                 req->enable = loop_mode_b;
7946                 req->mask = loop_mode_b;
7947         } else {
7948                 req->mask = loop_mode_b;
7949         }
7950
7951         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7952         if (ret) {
7953                 dev_err(&hdev->pdev->dev,
7954                         "common loopback set fail, ret = %d\n", ret);
7955                 return ret;
7956         }
7957
7958         do {
7959                 msleep(HCLGE_COMMON_LB_RETRY_MS);
7960                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7961                                            true);
7962                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7963                 if (ret) {
7964                         dev_err(&hdev->pdev->dev,
7965                                 "common loopback get, ret = %d\n", ret);
7966                         return ret;
7967                 }
7968         } while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7969                  !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7970
7971         if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7972                 dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
7973                 return -EBUSY;
7974         } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7975                 dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
7976                 return -EIO;
7977         }
7978         return ret;
7979 }
7980
7981 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7982                                      enum hnae3_loop loop_mode)
7983 {
7984         int ret;
7985
7986         ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7987         if (ret)
7988                 return ret;
7989
7990         hclge_cfg_mac_mode(hdev, en);
7991
7992         ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7993         if (ret)
7994                 dev_err(&hdev->pdev->dev,
7995                         "serdes loopback config mac mode timeout\n");
7996
7997         return ret;
7998 }
7999
8000 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
8001                                      struct phy_device *phydev)
8002 {
8003         int ret;
8004
8005         if (!phydev->suspended) {
8006                 ret = phy_suspend(phydev);
8007                 if (ret)
8008                         return ret;
8009         }
8010
8011         ret = phy_resume(phydev);
8012         if (ret)
8013                 return ret;
8014
8015         return phy_loopback(phydev, true);
8016 }
8017
8018 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
8019                                       struct phy_device *phydev)
8020 {
8021         int ret;
8022
8023         ret = phy_loopback(phydev, false);
8024         if (ret)
8025                 return ret;
8026
8027         return phy_suspend(phydev);
8028 }
8029
8030 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
8031 {
8032         struct phy_device *phydev = hdev->hw.mac.phydev;
8033         int ret;
8034
8035         if (!phydev) {
8036                 if (hnae3_dev_phy_imp_supported(hdev))
8037                         return hclge_set_common_loopback(hdev, en,
8038                                                          HNAE3_LOOP_PHY);
8039                 return -ENOTSUPP;
8040         }
8041
8042         if (en)
8043                 ret = hclge_enable_phy_loopback(hdev, phydev);
8044         else
8045                 ret = hclge_disable_phy_loopback(hdev, phydev);
8046         if (ret) {
8047                 dev_err(&hdev->pdev->dev,
8048                         "set phy loopback fail, ret = %d\n", ret);
8049                 return ret;
8050         }
8051
8052         hclge_cfg_mac_mode(hdev, en);
8053
8054         ret = hclge_mac_phy_link_status_wait(hdev, en, true);
8055         if (ret)
8056                 dev_err(&hdev->pdev->dev,
8057                         "phy loopback config mac mode timeout\n");
8058
8059         return ret;
8060 }
8061
8062 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
8063                                      u16 stream_id, bool enable)
8064 {
8065         struct hclge_desc desc;
8066         struct hclge_cfg_com_tqp_queue_cmd *req =
8067                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
8068
8069         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
8070         req->tqp_id = cpu_to_le16(tqp_id);
8071         req->stream_id = cpu_to_le16(stream_id);
8072         if (enable)
8073                 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
8074
8075         return hclge_cmd_send(&hdev->hw, &desc, 1);
8076 }
8077
8078 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
8079 {
8080         struct hclge_vport *vport = hclge_get_vport(handle);
8081         struct hclge_dev *hdev = vport->back;
8082         int ret;
8083         u16 i;
8084
8085         for (i = 0; i < handle->kinfo.num_tqps; i++) {
8086                 ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
8087                 if (ret)
8088                         return ret;
8089         }
8090         return 0;
8091 }
8092
8093 static int hclge_set_loopback(struct hnae3_handle *handle,
8094                               enum hnae3_loop loop_mode, bool en)
8095 {
8096         struct hclge_vport *vport = hclge_get_vport(handle);
8097         struct hclge_dev *hdev = vport->back;
8098         int ret;
8099
8100         /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
8101          * default, SSU loopback is enabled, so if the SMAC and the DMAC are
8102          * the same, the packets are looped back in the SSU. If SSU loopback
8103          * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
8104          */
8105         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8106                 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
8107
8108                 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
8109                                                 HCLGE_SWITCH_ALW_LPBK_MASK);
8110                 if (ret)
8111                         return ret;
8112         }
8113
8114         switch (loop_mode) {
8115         case HNAE3_LOOP_APP:
8116                 ret = hclge_set_app_loopback(hdev, en);
8117                 break;
8118         case HNAE3_LOOP_SERIAL_SERDES:
8119         case HNAE3_LOOP_PARALLEL_SERDES:
8120                 ret = hclge_set_common_loopback(hdev, en, loop_mode);
8121                 break;
8122         case HNAE3_LOOP_PHY:
8123                 ret = hclge_set_phy_loopback(hdev, en);
8124                 break;
8125         default:
8126                 ret = -ENOTSUPP;
8127                 dev_err(&hdev->pdev->dev,
8128                         "loop_mode %d is not supported\n", loop_mode);
8129                 break;
8130         }
8131
8132         if (ret)
8133                 return ret;
8134
8135         ret = hclge_tqp_enable(handle, en);
8136         if (ret)
8137                 dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
8138                         en ? "enable" : "disable", ret);
8139
8140         return ret;
8141 }
8142
8143 static int hclge_set_default_loopback(struct hclge_dev *hdev)
8144 {
8145         int ret;
8146
8147         ret = hclge_set_app_loopback(hdev, false);
8148         if (ret)
8149                 return ret;
8150
8151         ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
8152         if (ret)
8153                 return ret;
8154
8155         return hclge_cfg_common_loopback(hdev, false,
8156                                          HNAE3_LOOP_PARALLEL_SERDES);
8157 }
8158
8159 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
8160 {
8161         struct hclge_vport *vport = hclge_get_vport(handle);
8162         struct hnae3_knic_private_info *kinfo;
8163         struct hnae3_queue *queue;
8164         struct hclge_tqp *tqp;
8165         int i;
8166
8167         kinfo = &vport->nic.kinfo;
8168         for (i = 0; i < kinfo->num_tqps; i++) {
8169                 queue = handle->kinfo.tqp[i];
8170                 tqp = container_of(queue, struct hclge_tqp, q);
8171                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
8172         }
8173 }
8174
8175 static void hclge_flush_link_update(struct hclge_dev *hdev)
8176 {
8177 #define HCLGE_FLUSH_LINK_TIMEOUT        100000
8178
8179         unsigned long last = hdev->serv_processed_cnt;
8180         int i = 0;
8181
8182         while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
8183                i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
8184                last == hdev->serv_processed_cnt)
8185                 usleep_range(1, 1);
8186 }
8187
8188 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
8189 {
8190         struct hclge_vport *vport = hclge_get_vport(handle);
8191         struct hclge_dev *hdev = vport->back;
8192
8193         if (enable) {
8194                 hclge_task_schedule(hdev, 0);
8195         } else {
8196                 /* Set the DOWN flag here to disable link updating */
8197                 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8198
8199                 /* flush memory to make sure DOWN is seen by service task */
8200                 smp_mb__before_atomic();
8201                 hclge_flush_link_update(hdev);
8202         }
8203 }
8204
8205 static int hclge_ae_start(struct hnae3_handle *handle)
8206 {
8207         struct hclge_vport *vport = hclge_get_vport(handle);
8208         struct hclge_dev *hdev = vport->back;
8209
8210         /* mac enable */
8211         hclge_cfg_mac_mode(hdev, true);
8212         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
8213         hdev->hw.mac.link = 0;
8214
8215         /* reset tqp stats */
8216         hclge_reset_tqp_stats(handle);
8217
8218         hclge_mac_start_phy(hdev);
8219
8220         return 0;
8221 }
8222
8223 static void hclge_ae_stop(struct hnae3_handle *handle)
8224 {
8225         struct hclge_vport *vport = hclge_get_vport(handle);
8226         struct hclge_dev *hdev = vport->back;
8227
8228         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8229         spin_lock_bh(&hdev->fd_rule_lock);
8230         hclge_clear_arfs_rules(hdev);
8231         spin_unlock_bh(&hdev->fd_rule_lock);
8232
8233         /* If it is not PF reset or FLR, the firmware will disable the MAC,
8234          * so it only need to stop phy here.
8235          */
8236         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
8237             hdev->reset_type != HNAE3_FUNC_RESET &&
8238             hdev->reset_type != HNAE3_FLR_RESET) {
8239                 hclge_mac_stop_phy(hdev);
8240                 hclge_update_link_status(hdev);
8241                 return;
8242         }
8243
8244         hclge_reset_tqp(handle);
8245
8246         hclge_config_mac_tnl_int(hdev, false);
8247
8248         /* Mac disable */
8249         hclge_cfg_mac_mode(hdev, false);
8250
8251         hclge_mac_stop_phy(hdev);
8252
8253         /* reset tqp stats */
8254         hclge_reset_tqp_stats(handle);
8255         hclge_update_link_status(hdev);
8256 }
8257
8258 int hclge_vport_start(struct hclge_vport *vport)
8259 {
8260         struct hclge_dev *hdev = vport->back;
8261
8262         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8263         set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
8264         vport->last_active_jiffies = jiffies;
8265
8266         if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8267                 if (vport->vport_id) {
8268                         hclge_restore_mac_table_common(vport);
8269                         hclge_restore_vport_vlan_table(vport);
8270                 } else {
8271                         hclge_restore_hw_table(hdev);
8272                 }
8273         }
8274
8275         clear_bit(vport->vport_id, hdev->vport_config_block);
8276
8277         return 0;
8278 }
8279
8280 void hclge_vport_stop(struct hclge_vport *vport)
8281 {
8282         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8283 }
8284
8285 static int hclge_client_start(struct hnae3_handle *handle)
8286 {
8287         struct hclge_vport *vport = hclge_get_vport(handle);
8288
8289         return hclge_vport_start(vport);
8290 }
8291
8292 static void hclge_client_stop(struct hnae3_handle *handle)
8293 {
8294         struct hclge_vport *vport = hclge_get_vport(handle);
8295
8296         hclge_vport_stop(vport);
8297 }
8298
8299 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8300                                          u16 cmdq_resp, u8  resp_code,
8301                                          enum hclge_mac_vlan_tbl_opcode op)
8302 {
8303         struct hclge_dev *hdev = vport->back;
8304
8305         if (cmdq_resp) {
8306                 dev_err(&hdev->pdev->dev,
8307                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8308                         cmdq_resp);
8309                 return -EIO;
8310         }
8311
8312         if (op == HCLGE_MAC_VLAN_ADD) {
8313                 if (!resp_code || resp_code == 1)
8314                         return 0;
8315                 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8316                          resp_code == HCLGE_ADD_MC_OVERFLOW)
8317                         return -ENOSPC;
8318
8319                 dev_err(&hdev->pdev->dev,
8320                         "add mac addr failed for undefined, code=%u.\n",
8321                         resp_code);
8322                 return -EIO;
8323         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
8324                 if (!resp_code) {
8325                         return 0;
8326                 } else if (resp_code == 1) {
8327                         dev_dbg(&hdev->pdev->dev,
8328                                 "remove mac addr failed for miss.\n");
8329                         return -ENOENT;
8330                 }
8331
8332                 dev_err(&hdev->pdev->dev,
8333                         "remove mac addr failed for undefined, code=%u.\n",
8334                         resp_code);
8335                 return -EIO;
8336         } else if (op == HCLGE_MAC_VLAN_LKUP) {
8337                 if (!resp_code) {
8338                         return 0;
8339                 } else if (resp_code == 1) {
8340                         dev_dbg(&hdev->pdev->dev,
8341                                 "lookup mac addr failed for miss.\n");
8342                         return -ENOENT;
8343                 }
8344
8345                 dev_err(&hdev->pdev->dev,
8346                         "lookup mac addr failed for undefined, code=%u.\n",
8347                         resp_code);
8348                 return -EIO;
8349         }
8350
8351         dev_err(&hdev->pdev->dev,
8352                 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8353
8354         return -EINVAL;
8355 }
8356
8357 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8358 {
8359 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8360
8361         unsigned int word_num;
8362         unsigned int bit_num;
8363
8364         if (vfid > 255 || vfid < 0)
8365                 return -EIO;
8366
8367         if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8368                 word_num = vfid / 32;
8369                 bit_num  = vfid % 32;
8370                 if (clr)
8371                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8372                 else
8373                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8374         } else {
8375                 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8376                 bit_num  = vfid % 32;
8377                 if (clr)
8378                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8379                 else
8380                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8381         }
8382
8383         return 0;
8384 }
8385
8386 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8387 {
8388 #define HCLGE_DESC_NUMBER 3
8389 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8390         int i, j;
8391
8392         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8393                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8394                         if (desc[i].data[j])
8395                                 return false;
8396
8397         return true;
8398 }
8399
8400 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8401                                    const u8 *addr, bool is_mc)
8402 {
8403         const unsigned char *mac_addr = addr;
8404         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8405                        (mac_addr[0]) | (mac_addr[1] << 8);
8406         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
8407
8408         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8409         if (is_mc) {
8410                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8411                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8412         }
8413
8414         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8415         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8416 }
8417
8418 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8419                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
8420 {
8421         struct hclge_dev *hdev = vport->back;
8422         struct hclge_desc desc;
8423         u8 resp_code;
8424         u16 retval;
8425         int ret;
8426
8427         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8428
8429         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8430
8431         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8432         if (ret) {
8433                 dev_err(&hdev->pdev->dev,
8434                         "del mac addr failed for cmd_send, ret =%d.\n",
8435                         ret);
8436                 return ret;
8437         }
8438         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8439         retval = le16_to_cpu(desc.retval);
8440
8441         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8442                                              HCLGE_MAC_VLAN_REMOVE);
8443 }
8444
8445 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8446                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
8447                                      struct hclge_desc *desc,
8448                                      bool is_mc)
8449 {
8450         struct hclge_dev *hdev = vport->back;
8451         u8 resp_code;
8452         u16 retval;
8453         int ret;
8454
8455         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8456         if (is_mc) {
8457                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8458                 memcpy(desc[0].data,
8459                        req,
8460                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8461                 hclge_cmd_setup_basic_desc(&desc[1],
8462                                            HCLGE_OPC_MAC_VLAN_ADD,
8463                                            true);
8464                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8465                 hclge_cmd_setup_basic_desc(&desc[2],
8466                                            HCLGE_OPC_MAC_VLAN_ADD,
8467                                            true);
8468                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
8469         } else {
8470                 memcpy(desc[0].data,
8471                        req,
8472                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8473                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
8474         }
8475         if (ret) {
8476                 dev_err(&hdev->pdev->dev,
8477                         "lookup mac addr failed for cmd_send, ret =%d.\n",
8478                         ret);
8479                 return ret;
8480         }
8481         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8482         retval = le16_to_cpu(desc[0].retval);
8483
8484         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8485                                              HCLGE_MAC_VLAN_LKUP);
8486 }
8487
8488 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8489                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
8490                                   struct hclge_desc *mc_desc)
8491 {
8492         struct hclge_dev *hdev = vport->back;
8493         int cfg_status;
8494         u8 resp_code;
8495         u16 retval;
8496         int ret;
8497
8498         if (!mc_desc) {
8499                 struct hclge_desc desc;
8500
8501                 hclge_cmd_setup_basic_desc(&desc,
8502                                            HCLGE_OPC_MAC_VLAN_ADD,
8503                                            false);
8504                 memcpy(desc.data, req,
8505                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8506                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8507                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8508                 retval = le16_to_cpu(desc.retval);
8509
8510                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8511                                                            resp_code,
8512                                                            HCLGE_MAC_VLAN_ADD);
8513         } else {
8514                 hclge_cmd_reuse_desc(&mc_desc[0], false);
8515                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8516                 hclge_cmd_reuse_desc(&mc_desc[1], false);
8517                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8518                 hclge_cmd_reuse_desc(&mc_desc[2], false);
8519                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
8520                 memcpy(mc_desc[0].data, req,
8521                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8522                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8523                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8524                 retval = le16_to_cpu(mc_desc[0].retval);
8525
8526                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8527                                                            resp_code,
8528                                                            HCLGE_MAC_VLAN_ADD);
8529         }
8530
8531         if (ret) {
8532                 dev_err(&hdev->pdev->dev,
8533                         "add mac addr failed for cmd_send, ret =%d.\n",
8534                         ret);
8535                 return ret;
8536         }
8537
8538         return cfg_status;
8539 }
8540
8541 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8542                                u16 *allocated_size)
8543 {
8544         struct hclge_umv_spc_alc_cmd *req;
8545         struct hclge_desc desc;
8546         int ret;
8547
8548         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8549         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8550
8551         req->space_size = cpu_to_le32(space_size);
8552
8553         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8554         if (ret) {
8555                 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8556                         ret);
8557                 return ret;
8558         }
8559
8560         *allocated_size = le32_to_cpu(desc.data[1]);
8561
8562         return 0;
8563 }
8564
8565 static int hclge_init_umv_space(struct hclge_dev *hdev)
8566 {
8567         u16 allocated_size = 0;
8568         int ret;
8569
8570         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8571         if (ret)
8572                 return ret;
8573
8574         if (allocated_size < hdev->wanted_umv_size)
8575                 dev_warn(&hdev->pdev->dev,
8576                          "failed to alloc umv space, want %u, get %u\n",
8577                          hdev->wanted_umv_size, allocated_size);
8578
8579         hdev->max_umv_size = allocated_size;
8580         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8581         hdev->share_umv_size = hdev->priv_umv_size +
8582                         hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8583
8584         if (hdev->ae_dev->dev_specs.mc_mac_size)
8585                 set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps);
8586
8587         return 0;
8588 }
8589
8590 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8591 {
8592         struct hclge_vport *vport;
8593         int i;
8594
8595         for (i = 0; i < hdev->num_alloc_vport; i++) {
8596                 vport = &hdev->vport[i];
8597                 vport->used_umv_num = 0;
8598         }
8599
8600         mutex_lock(&hdev->vport_lock);
8601         hdev->share_umv_size = hdev->priv_umv_size +
8602                         hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8603         mutex_unlock(&hdev->vport_lock);
8604
8605         hdev->used_mc_mac_num = 0;
8606 }
8607
8608 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8609 {
8610         struct hclge_dev *hdev = vport->back;
8611         bool is_full;
8612
8613         if (need_lock)
8614                 mutex_lock(&hdev->vport_lock);
8615
8616         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8617                    hdev->share_umv_size == 0);
8618
8619         if (need_lock)
8620                 mutex_unlock(&hdev->vport_lock);
8621
8622         return is_full;
8623 }
8624
8625 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8626 {
8627         struct hclge_dev *hdev = vport->back;
8628
8629         if (is_free) {
8630                 if (vport->used_umv_num > hdev->priv_umv_size)
8631                         hdev->share_umv_size++;
8632
8633                 if (vport->used_umv_num > 0)
8634                         vport->used_umv_num--;
8635         } else {
8636                 if (vport->used_umv_num >= hdev->priv_umv_size &&
8637                     hdev->share_umv_size > 0)
8638                         hdev->share_umv_size--;
8639                 vport->used_umv_num++;
8640         }
8641 }
8642
8643 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8644                                                   const u8 *mac_addr)
8645 {
8646         struct hclge_mac_node *mac_node, *tmp;
8647
8648         list_for_each_entry_safe(mac_node, tmp, list, node)
8649                 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8650                         return mac_node;
8651
8652         return NULL;
8653 }
8654
8655 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8656                                   enum HCLGE_MAC_NODE_STATE state)
8657 {
8658         switch (state) {
8659         /* from set_rx_mode or tmp_add_list */
8660         case HCLGE_MAC_TO_ADD:
8661                 if (mac_node->state == HCLGE_MAC_TO_DEL)
8662                         mac_node->state = HCLGE_MAC_ACTIVE;
8663                 break;
8664         /* only from set_rx_mode */
8665         case HCLGE_MAC_TO_DEL:
8666                 if (mac_node->state == HCLGE_MAC_TO_ADD) {
8667                         list_del(&mac_node->node);
8668                         kfree(mac_node);
8669                 } else {
8670                         mac_node->state = HCLGE_MAC_TO_DEL;
8671                 }
8672                 break;
8673         /* only from tmp_add_list, the mac_node->state won't be
8674          * ACTIVE.
8675          */
8676         case HCLGE_MAC_ACTIVE:
8677                 if (mac_node->state == HCLGE_MAC_TO_ADD)
8678                         mac_node->state = HCLGE_MAC_ACTIVE;
8679
8680                 break;
8681         }
8682 }
8683
8684 int hclge_update_mac_list(struct hclge_vport *vport,
8685                           enum HCLGE_MAC_NODE_STATE state,
8686                           enum HCLGE_MAC_ADDR_TYPE mac_type,
8687                           const unsigned char *addr)
8688 {
8689         struct hclge_dev *hdev = vport->back;
8690         struct hclge_mac_node *mac_node;
8691         struct list_head *list;
8692
8693         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8694                 &vport->uc_mac_list : &vport->mc_mac_list;
8695
8696         spin_lock_bh(&vport->mac_list_lock);
8697
8698         /* if the mac addr is already in the mac list, no need to add a new
8699          * one into it, just check the mac addr state, convert it to a new
8700          * state, or just remove it, or do nothing.
8701          */
8702         mac_node = hclge_find_mac_node(list, addr);
8703         if (mac_node) {
8704                 hclge_update_mac_node(mac_node, state);
8705                 spin_unlock_bh(&vport->mac_list_lock);
8706                 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8707                 return 0;
8708         }
8709
8710         /* if this address is never added, unnecessary to delete */
8711         if (state == HCLGE_MAC_TO_DEL) {
8712                 spin_unlock_bh(&vport->mac_list_lock);
8713                 dev_err(&hdev->pdev->dev,
8714                         "failed to delete address %pM from mac list\n",
8715                         addr);
8716                 return -ENOENT;
8717         }
8718
8719         mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8720         if (!mac_node) {
8721                 spin_unlock_bh(&vport->mac_list_lock);
8722                 return -ENOMEM;
8723         }
8724
8725         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8726
8727         mac_node->state = state;
8728         ether_addr_copy(mac_node->mac_addr, addr);
8729         list_add_tail(&mac_node->node, list);
8730
8731         spin_unlock_bh(&vport->mac_list_lock);
8732
8733         return 0;
8734 }
8735
8736 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8737                              const unsigned char *addr)
8738 {
8739         struct hclge_vport *vport = hclge_get_vport(handle);
8740
8741         return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8742                                      addr);
8743 }
8744
8745 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8746                              const unsigned char *addr)
8747 {
8748         struct hclge_dev *hdev = vport->back;
8749         struct hclge_mac_vlan_tbl_entry_cmd req;
8750         struct hclge_desc desc;
8751         u16 egress_port = 0;
8752         int ret;
8753
8754         /* mac addr check */
8755         if (is_zero_ether_addr(addr) ||
8756             is_broadcast_ether_addr(addr) ||
8757             is_multicast_ether_addr(addr)) {
8758                 dev_err(&hdev->pdev->dev,
8759                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8760                          addr, is_zero_ether_addr(addr),
8761                          is_broadcast_ether_addr(addr),
8762                          is_multicast_ether_addr(addr));
8763                 return -EINVAL;
8764         }
8765
8766         memset(&req, 0, sizeof(req));
8767
8768         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8769                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8770
8771         req.egress_port = cpu_to_le16(egress_port);
8772
8773         hclge_prepare_mac_addr(&req, addr, false);
8774
8775         /* Lookup the mac address in the mac_vlan table, and add
8776          * it if the entry is inexistent. Repeated unicast entry
8777          * is not allowed in the mac vlan table.
8778          */
8779         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8780         if (ret == -ENOENT) {
8781                 mutex_lock(&hdev->vport_lock);
8782                 if (!hclge_is_umv_space_full(vport, false)) {
8783                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8784                         if (!ret)
8785                                 hclge_update_umv_space(vport, false);
8786                         mutex_unlock(&hdev->vport_lock);
8787                         return ret;
8788                 }
8789                 mutex_unlock(&hdev->vport_lock);
8790
8791                 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8792                         dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8793                                 hdev->priv_umv_size);
8794
8795                 return -ENOSPC;
8796         }
8797
8798         /* check if we just hit the duplicate */
8799         if (!ret)
8800                 return -EEXIST;
8801
8802         return ret;
8803 }
8804
8805 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8806                             const unsigned char *addr)
8807 {
8808         struct hclge_vport *vport = hclge_get_vport(handle);
8809
8810         return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8811                                      addr);
8812 }
8813
8814 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8815                             const unsigned char *addr)
8816 {
8817         struct hclge_dev *hdev = vport->back;
8818         struct hclge_mac_vlan_tbl_entry_cmd req;
8819         int ret;
8820
8821         /* mac addr check */
8822         if (is_zero_ether_addr(addr) ||
8823             is_broadcast_ether_addr(addr) ||
8824             is_multicast_ether_addr(addr)) {
8825                 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8826                         addr);
8827                 return -EINVAL;
8828         }
8829
8830         memset(&req, 0, sizeof(req));
8831         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8832         hclge_prepare_mac_addr(&req, addr, false);
8833         ret = hclge_remove_mac_vlan_tbl(vport, &req);
8834         if (!ret) {
8835                 mutex_lock(&hdev->vport_lock);
8836                 hclge_update_umv_space(vport, true);
8837                 mutex_unlock(&hdev->vport_lock);
8838         } else if (ret == -ENOENT) {
8839                 ret = 0;
8840         }
8841
8842         return ret;
8843 }
8844
8845 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8846                              const unsigned char *addr)
8847 {
8848         struct hclge_vport *vport = hclge_get_vport(handle);
8849
8850         return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8851                                      addr);
8852 }
8853
8854 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8855                              const unsigned char *addr)
8856 {
8857         struct hclge_dev *hdev = vport->back;
8858         struct hclge_mac_vlan_tbl_entry_cmd req;
8859         struct hclge_desc desc[3];
8860         bool is_new_addr = false;
8861         int status;
8862
8863         /* mac addr check */
8864         if (!is_multicast_ether_addr(addr)) {
8865                 dev_err(&hdev->pdev->dev,
8866                         "Add mc mac err! invalid mac:%pM.\n",
8867                          addr);
8868                 return -EINVAL;
8869         }
8870         memset(&req, 0, sizeof(req));
8871         hclge_prepare_mac_addr(&req, addr, true);
8872         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8873         if (status) {
8874                 if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) &&
8875                     hdev->used_mc_mac_num >=
8876                     hdev->ae_dev->dev_specs.mc_mac_size)
8877                         goto err_no_space;
8878
8879                 is_new_addr = true;
8880
8881                 /* This mac addr do not exist, add new entry for it */
8882                 memset(desc[0].data, 0, sizeof(desc[0].data));
8883                 memset(desc[1].data, 0, sizeof(desc[0].data));
8884                 memset(desc[2].data, 0, sizeof(desc[0].data));
8885         }
8886         status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8887         if (status)
8888                 return status;
8889         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8890         if (status == -ENOSPC)
8891                 goto err_no_space;
8892         else if (!status && is_new_addr)
8893                 hdev->used_mc_mac_num++;
8894
8895         return status;
8896
8897 err_no_space:
8898         /* if already overflow, not to print each time */
8899         if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8900                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8901         return -ENOSPC;
8902 }
8903
8904 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8905                             const unsigned char *addr)
8906 {
8907         struct hclge_vport *vport = hclge_get_vport(handle);
8908
8909         return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8910                                      addr);
8911 }
8912
8913 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8914                             const unsigned char *addr)
8915 {
8916         struct hclge_dev *hdev = vport->back;
8917         struct hclge_mac_vlan_tbl_entry_cmd req;
8918         enum hclge_cmd_status status;
8919         struct hclge_desc desc[3];
8920
8921         /* mac addr check */
8922         if (!is_multicast_ether_addr(addr)) {
8923                 dev_dbg(&hdev->pdev->dev,
8924                         "Remove mc mac err! invalid mac:%pM.\n",
8925                          addr);
8926                 return -EINVAL;
8927         }
8928
8929         memset(&req, 0, sizeof(req));
8930         hclge_prepare_mac_addr(&req, addr, true);
8931         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8932         if (!status) {
8933                 /* This mac addr exist, remove this handle's VFID for it */
8934                 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8935                 if (status)
8936                         return status;
8937
8938                 if (hclge_is_all_function_id_zero(desc)) {
8939                         /* All the vfid is zero, so need to delete this entry */
8940                         status = hclge_remove_mac_vlan_tbl(vport, &req);
8941                         if (!status)
8942                                 hdev->used_mc_mac_num--;
8943                 } else {
8944                         /* Not all the vfid is zero, update the vfid */
8945                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8946                 }
8947         } else if (status == -ENOENT) {
8948                 status = 0;
8949         }
8950
8951         return status;
8952 }
8953
8954 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8955                                       struct list_head *list,
8956                                       int (*sync)(struct hclge_vport *,
8957                                                   const unsigned char *))
8958 {
8959         struct hclge_mac_node *mac_node, *tmp;
8960         int ret;
8961
8962         list_for_each_entry_safe(mac_node, tmp, list, node) {
8963                 ret = sync(vport, mac_node->mac_addr);
8964                 if (!ret) {
8965                         mac_node->state = HCLGE_MAC_ACTIVE;
8966                 } else {
8967                         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8968                                 &vport->state);
8969
8970                         /* If one unicast mac address is existing in hardware,
8971                          * we need to try whether other unicast mac addresses
8972                          * are new addresses that can be added.
8973                          */
8974                         if (ret != -EEXIST)
8975                                 break;
8976                 }
8977         }
8978 }
8979
8980 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8981                                         struct list_head *list,
8982                                         int (*unsync)(struct hclge_vport *,
8983                                                       const unsigned char *))
8984 {
8985         struct hclge_mac_node *mac_node, *tmp;
8986         int ret;
8987
8988         list_for_each_entry_safe(mac_node, tmp, list, node) {
8989                 ret = unsync(vport, mac_node->mac_addr);
8990                 if (!ret || ret == -ENOENT) {
8991                         list_del(&mac_node->node);
8992                         kfree(mac_node);
8993                 } else {
8994                         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8995                                 &vport->state);
8996                         break;
8997                 }
8998         }
8999 }
9000
9001 static bool hclge_sync_from_add_list(struct list_head *add_list,
9002                                      struct list_head *mac_list)
9003 {
9004         struct hclge_mac_node *mac_node, *tmp, *new_node;
9005         bool all_added = true;
9006
9007         list_for_each_entry_safe(mac_node, tmp, add_list, node) {
9008                 if (mac_node->state == HCLGE_MAC_TO_ADD)
9009                         all_added = false;
9010
9011                 /* if the mac address from tmp_add_list is not in the
9012                  * uc/mc_mac_list, it means have received a TO_DEL request
9013                  * during the time window of adding the mac address into mac
9014                  * table. if mac_node state is ACTIVE, then change it to TO_DEL,
9015                  * then it will be removed at next time. else it must be TO_ADD,
9016                  * this address hasn't been added into mac table,
9017                  * so just remove the mac node.
9018                  */
9019                 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
9020                 if (new_node) {
9021                         hclge_update_mac_node(new_node, mac_node->state);
9022                         list_del(&mac_node->node);
9023                         kfree(mac_node);
9024                 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
9025                         mac_node->state = HCLGE_MAC_TO_DEL;
9026                         list_move_tail(&mac_node->node, mac_list);
9027                 } else {
9028                         list_del(&mac_node->node);
9029                         kfree(mac_node);
9030                 }
9031         }
9032
9033         return all_added;
9034 }
9035
9036 static void hclge_sync_from_del_list(struct list_head *del_list,
9037                                      struct list_head *mac_list)
9038 {
9039         struct hclge_mac_node *mac_node, *tmp, *new_node;
9040
9041         list_for_each_entry_safe(mac_node, tmp, del_list, node) {
9042                 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
9043                 if (new_node) {
9044                         /* If the mac addr exists in the mac list, it means
9045                          * received a new TO_ADD request during the time window
9046                          * of configuring the mac address. For the mac node
9047                          * state is TO_ADD, and the address is already in the
9048                          * in the hardware(due to delete fail), so we just need
9049                          * to change the mac node state to ACTIVE.
9050                          */
9051                         new_node->state = HCLGE_MAC_ACTIVE;
9052                         list_del(&mac_node->node);
9053                         kfree(mac_node);
9054                 } else {
9055                         list_move_tail(&mac_node->node, mac_list);
9056                 }
9057         }
9058 }
9059
9060 static void hclge_update_overflow_flags(struct hclge_vport *vport,
9061                                         enum HCLGE_MAC_ADDR_TYPE mac_type,
9062                                         bool is_all_added)
9063 {
9064         if (mac_type == HCLGE_MAC_ADDR_UC) {
9065                 if (is_all_added)
9066                         vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
9067                 else
9068                         vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
9069         } else {
9070                 if (is_all_added)
9071                         vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
9072                 else
9073                         vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
9074         }
9075 }
9076
9077 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
9078                                        enum HCLGE_MAC_ADDR_TYPE mac_type)
9079 {
9080         struct hclge_mac_node *mac_node, *tmp, *new_node;
9081         struct list_head tmp_add_list, tmp_del_list;
9082         struct list_head *list;
9083         bool all_added;
9084
9085         INIT_LIST_HEAD(&tmp_add_list);
9086         INIT_LIST_HEAD(&tmp_del_list);
9087
9088         /* move the mac addr to the tmp_add_list and tmp_del_list, then
9089          * we can add/delete these mac addr outside the spin lock
9090          */
9091         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9092                 &vport->uc_mac_list : &vport->mc_mac_list;
9093
9094         spin_lock_bh(&vport->mac_list_lock);
9095
9096         list_for_each_entry_safe(mac_node, tmp, list, node) {
9097                 switch (mac_node->state) {
9098                 case HCLGE_MAC_TO_DEL:
9099                         list_move_tail(&mac_node->node, &tmp_del_list);
9100                         break;
9101                 case HCLGE_MAC_TO_ADD:
9102                         new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9103                         if (!new_node)
9104                                 goto stop_traverse;
9105                         ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
9106                         new_node->state = mac_node->state;
9107                         list_add_tail(&new_node->node, &tmp_add_list);
9108                         break;
9109                 default:
9110                         break;
9111                 }
9112         }
9113
9114 stop_traverse:
9115         spin_unlock_bh(&vport->mac_list_lock);
9116
9117         /* delete first, in order to get max mac table space for adding */
9118         if (mac_type == HCLGE_MAC_ADDR_UC) {
9119                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9120                                             hclge_rm_uc_addr_common);
9121                 hclge_sync_vport_mac_list(vport, &tmp_add_list,
9122                                           hclge_add_uc_addr_common);
9123         } else {
9124                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9125                                             hclge_rm_mc_addr_common);
9126                 hclge_sync_vport_mac_list(vport, &tmp_add_list,
9127                                           hclge_add_mc_addr_common);
9128         }
9129
9130         /* if some mac addresses were added/deleted fail, move back to the
9131          * mac_list, and retry at next time.
9132          */
9133         spin_lock_bh(&vport->mac_list_lock);
9134
9135         hclge_sync_from_del_list(&tmp_del_list, list);
9136         all_added = hclge_sync_from_add_list(&tmp_add_list, list);
9137
9138         spin_unlock_bh(&vport->mac_list_lock);
9139
9140         hclge_update_overflow_flags(vport, mac_type, all_added);
9141 }
9142
9143 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
9144 {
9145         struct hclge_dev *hdev = vport->back;
9146
9147         if (test_bit(vport->vport_id, hdev->vport_config_block))
9148                 return false;
9149
9150         if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
9151                 return true;
9152
9153         return false;
9154 }
9155
9156 static void hclge_sync_mac_table(struct hclge_dev *hdev)
9157 {
9158         int i;
9159
9160         for (i = 0; i < hdev->num_alloc_vport; i++) {
9161                 struct hclge_vport *vport = &hdev->vport[i];
9162
9163                 if (!hclge_need_sync_mac_table(vport))
9164                         continue;
9165
9166                 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
9167                 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
9168         }
9169 }
9170
9171 static void hclge_build_del_list(struct list_head *list,
9172                                  bool is_del_list,
9173                                  struct list_head *tmp_del_list)
9174 {
9175         struct hclge_mac_node *mac_cfg, *tmp;
9176
9177         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
9178                 switch (mac_cfg->state) {
9179                 case HCLGE_MAC_TO_DEL:
9180                 case HCLGE_MAC_ACTIVE:
9181                         list_move_tail(&mac_cfg->node, tmp_del_list);
9182                         break;
9183                 case HCLGE_MAC_TO_ADD:
9184                         if (is_del_list) {
9185                                 list_del(&mac_cfg->node);
9186                                 kfree(mac_cfg);
9187                         }
9188                         break;
9189                 }
9190         }
9191 }
9192
9193 static void hclge_unsync_del_list(struct hclge_vport *vport,
9194                                   int (*unsync)(struct hclge_vport *vport,
9195                                                 const unsigned char *addr),
9196                                   bool is_del_list,
9197                                   struct list_head *tmp_del_list)
9198 {
9199         struct hclge_mac_node *mac_cfg, *tmp;
9200         int ret;
9201
9202         list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
9203                 ret = unsync(vport, mac_cfg->mac_addr);
9204                 if (!ret || ret == -ENOENT) {
9205                         /* clear all mac addr from hardware, but remain these
9206                          * mac addr in the mac list, and restore them after
9207                          * vf reset finished.
9208                          */
9209                         if (!is_del_list &&
9210                             mac_cfg->state == HCLGE_MAC_ACTIVE) {
9211                                 mac_cfg->state = HCLGE_MAC_TO_ADD;
9212                         } else {
9213                                 list_del(&mac_cfg->node);
9214                                 kfree(mac_cfg);
9215                         }
9216                 } else if (is_del_list) {
9217                         mac_cfg->state = HCLGE_MAC_TO_DEL;
9218                 }
9219         }
9220 }
9221
9222 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
9223                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
9224 {
9225         int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
9226         struct hclge_dev *hdev = vport->back;
9227         struct list_head tmp_del_list, *list;
9228
9229         if (mac_type == HCLGE_MAC_ADDR_UC) {
9230                 list = &vport->uc_mac_list;
9231                 unsync = hclge_rm_uc_addr_common;
9232         } else {
9233                 list = &vport->mc_mac_list;
9234                 unsync = hclge_rm_mc_addr_common;
9235         }
9236
9237         INIT_LIST_HEAD(&tmp_del_list);
9238
9239         if (!is_del_list)
9240                 set_bit(vport->vport_id, hdev->vport_config_block);
9241
9242         spin_lock_bh(&vport->mac_list_lock);
9243
9244         hclge_build_del_list(list, is_del_list, &tmp_del_list);
9245
9246         spin_unlock_bh(&vport->mac_list_lock);
9247
9248         hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
9249
9250         spin_lock_bh(&vport->mac_list_lock);
9251
9252         hclge_sync_from_del_list(&tmp_del_list, list);
9253
9254         spin_unlock_bh(&vport->mac_list_lock);
9255 }
9256
9257 /* remove all mac address when uninitailize */
9258 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
9259                                         enum HCLGE_MAC_ADDR_TYPE mac_type)
9260 {
9261         struct hclge_mac_node *mac_node, *tmp;
9262         struct hclge_dev *hdev = vport->back;
9263         struct list_head tmp_del_list, *list;
9264
9265         INIT_LIST_HEAD(&tmp_del_list);
9266
9267         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9268                 &vport->uc_mac_list : &vport->mc_mac_list;
9269
9270         spin_lock_bh(&vport->mac_list_lock);
9271
9272         list_for_each_entry_safe(mac_node, tmp, list, node) {
9273                 switch (mac_node->state) {
9274                 case HCLGE_MAC_TO_DEL:
9275                 case HCLGE_MAC_ACTIVE:
9276                         list_move_tail(&mac_node->node, &tmp_del_list);
9277                         break;
9278                 case HCLGE_MAC_TO_ADD:
9279                         list_del(&mac_node->node);
9280                         kfree(mac_node);
9281                         break;
9282                 }
9283         }
9284
9285         spin_unlock_bh(&vport->mac_list_lock);
9286
9287         if (mac_type == HCLGE_MAC_ADDR_UC)
9288                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9289                                             hclge_rm_uc_addr_common);
9290         else
9291                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9292                                             hclge_rm_mc_addr_common);
9293
9294         if (!list_empty(&tmp_del_list))
9295                 dev_warn(&hdev->pdev->dev,
9296                          "uninit %s mac list for vport %u not completely.\n",
9297                          mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9298                          vport->vport_id);
9299
9300         list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9301                 list_del(&mac_node->node);
9302                 kfree(mac_node);
9303         }
9304 }
9305
9306 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9307 {
9308         struct hclge_vport *vport;
9309         int i;
9310
9311         for (i = 0; i < hdev->num_alloc_vport; i++) {
9312                 vport = &hdev->vport[i];
9313                 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9314                 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9315         }
9316 }
9317
9318 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9319                                               u16 cmdq_resp, u8 resp_code)
9320 {
9321 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
9322 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
9323 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
9324 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
9325
9326         int return_status;
9327
9328         if (cmdq_resp) {
9329                 dev_err(&hdev->pdev->dev,
9330                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9331                         cmdq_resp);
9332                 return -EIO;
9333         }
9334
9335         switch (resp_code) {
9336         case HCLGE_ETHERTYPE_SUCCESS_ADD:
9337         case HCLGE_ETHERTYPE_ALREADY_ADD:
9338                 return_status = 0;
9339                 break;
9340         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9341                 dev_err(&hdev->pdev->dev,
9342                         "add mac ethertype failed for manager table overflow.\n");
9343                 return_status = -EIO;
9344                 break;
9345         case HCLGE_ETHERTYPE_KEY_CONFLICT:
9346                 dev_err(&hdev->pdev->dev,
9347                         "add mac ethertype failed for key conflict.\n");
9348                 return_status = -EIO;
9349                 break;
9350         default:
9351                 dev_err(&hdev->pdev->dev,
9352                         "add mac ethertype failed for undefined, code=%u.\n",
9353                         resp_code);
9354                 return_status = -EIO;
9355         }
9356
9357         return return_status;
9358 }
9359
9360 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
9361                                      u8 *mac_addr)
9362 {
9363         struct hclge_mac_vlan_tbl_entry_cmd req;
9364         struct hclge_dev *hdev = vport->back;
9365         struct hclge_desc desc;
9366         u16 egress_port = 0;
9367         int i;
9368
9369         if (is_zero_ether_addr(mac_addr))
9370                 return false;
9371
9372         memset(&req, 0, sizeof(req));
9373         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
9374                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
9375         req.egress_port = cpu_to_le16(egress_port);
9376         hclge_prepare_mac_addr(&req, mac_addr, false);
9377
9378         if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
9379                 return true;
9380
9381         vf_idx += HCLGE_VF_VPORT_START_NUM;
9382         for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++)
9383                 if (i != vf_idx &&
9384                     ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
9385                         return true;
9386
9387         return false;
9388 }
9389
9390 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9391                             u8 *mac_addr)
9392 {
9393         struct hclge_vport *vport = hclge_get_vport(handle);
9394         struct hclge_dev *hdev = vport->back;
9395
9396         vport = hclge_get_vf_vport(hdev, vf);
9397         if (!vport)
9398                 return -EINVAL;
9399
9400         if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9401                 dev_info(&hdev->pdev->dev,
9402                          "Specified MAC(=%pM) is same as before, no change committed!\n",
9403                          mac_addr);
9404                 return 0;
9405         }
9406
9407         if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
9408                 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
9409                         mac_addr);
9410                 return -EEXIST;
9411         }
9412
9413         ether_addr_copy(vport->vf_info.mac, mac_addr);
9414
9415         if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9416                 dev_info(&hdev->pdev->dev,
9417                          "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
9418                          vf, mac_addr);
9419                 return hclge_inform_reset_assert_to_vf(vport);
9420         }
9421
9422         dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
9423                  vf, mac_addr);
9424         return 0;
9425 }
9426
9427 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9428                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
9429 {
9430         struct hclge_desc desc;
9431         u8 resp_code;
9432         u16 retval;
9433         int ret;
9434
9435         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9436         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9437
9438         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9439         if (ret) {
9440                 dev_err(&hdev->pdev->dev,
9441                         "add mac ethertype failed for cmd_send, ret =%d.\n",
9442                         ret);
9443                 return ret;
9444         }
9445
9446         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9447         retval = le16_to_cpu(desc.retval);
9448
9449         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9450 }
9451
9452 static int init_mgr_tbl(struct hclge_dev *hdev)
9453 {
9454         int ret;
9455         int i;
9456
9457         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9458                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9459                 if (ret) {
9460                         dev_err(&hdev->pdev->dev,
9461                                 "add mac ethertype failed, ret =%d.\n",
9462                                 ret);
9463                         return ret;
9464                 }
9465         }
9466
9467         return 0;
9468 }
9469
9470 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9471 {
9472         struct hclge_vport *vport = hclge_get_vport(handle);
9473         struct hclge_dev *hdev = vport->back;
9474
9475         ether_addr_copy(p, hdev->hw.mac.mac_addr);
9476 }
9477
9478 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9479                                        const u8 *old_addr, const u8 *new_addr)
9480 {
9481         struct list_head *list = &vport->uc_mac_list;
9482         struct hclge_mac_node *old_node, *new_node;
9483
9484         new_node = hclge_find_mac_node(list, new_addr);
9485         if (!new_node) {
9486                 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9487                 if (!new_node)
9488                         return -ENOMEM;
9489
9490                 new_node->state = HCLGE_MAC_TO_ADD;
9491                 ether_addr_copy(new_node->mac_addr, new_addr);
9492                 list_add(&new_node->node, list);
9493         } else {
9494                 if (new_node->state == HCLGE_MAC_TO_DEL)
9495                         new_node->state = HCLGE_MAC_ACTIVE;
9496
9497                 /* make sure the new addr is in the list head, avoid dev
9498                  * addr may be not re-added into mac table for the umv space
9499                  * limitation after global/imp reset which will clear mac
9500                  * table by hardware.
9501                  */
9502                 list_move(&new_node->node, list);
9503         }
9504
9505         if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9506                 old_node = hclge_find_mac_node(list, old_addr);
9507                 if (old_node) {
9508                         if (old_node->state == HCLGE_MAC_TO_ADD) {
9509                                 list_del(&old_node->node);
9510                                 kfree(old_node);
9511                         } else {
9512                                 old_node->state = HCLGE_MAC_TO_DEL;
9513                         }
9514                 }
9515         }
9516
9517         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9518
9519         return 0;
9520 }
9521
9522 static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p,
9523                               bool is_first)
9524 {
9525         const unsigned char *new_addr = (const unsigned char *)p;
9526         struct hclge_vport *vport = hclge_get_vport(handle);
9527         struct hclge_dev *hdev = vport->back;
9528         unsigned char *old_addr = NULL;
9529         int ret;
9530
9531         /* mac addr check */
9532         if (is_zero_ether_addr(new_addr) ||
9533             is_broadcast_ether_addr(new_addr) ||
9534             is_multicast_ether_addr(new_addr)) {
9535                 dev_err(&hdev->pdev->dev,
9536                         "change uc mac err! invalid mac: %pM.\n",
9537                          new_addr);
9538                 return -EINVAL;
9539         }
9540
9541         ret = hclge_pause_addr_cfg(hdev, new_addr);
9542         if (ret) {
9543                 dev_err(&hdev->pdev->dev,
9544                         "failed to configure mac pause address, ret = %d\n",
9545                         ret);
9546                 return ret;
9547         }
9548
9549         if (!is_first)
9550                 old_addr = hdev->hw.mac.mac_addr;
9551
9552         spin_lock_bh(&vport->mac_list_lock);
9553         ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9554         if (ret) {
9555                 dev_err(&hdev->pdev->dev,
9556                         "failed to change the mac addr:%pM, ret = %d\n",
9557                         new_addr, ret);
9558                 spin_unlock_bh(&vport->mac_list_lock);
9559
9560                 if (!is_first)
9561                         hclge_pause_addr_cfg(hdev, old_addr);
9562
9563                 return ret;
9564         }
9565         /* we must update dev addr with spin lock protect, preventing dev addr
9566          * being removed by set_rx_mode path.
9567          */
9568         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9569         spin_unlock_bh(&vport->mac_list_lock);
9570
9571         hclge_task_schedule(hdev, 0);
9572
9573         return 0;
9574 }
9575
9576 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9577 {
9578         struct mii_ioctl_data *data = if_mii(ifr);
9579
9580         if (!hnae3_dev_phy_imp_supported(hdev))
9581                 return -EOPNOTSUPP;
9582
9583         switch (cmd) {
9584         case SIOCGMIIPHY:
9585                 data->phy_id = hdev->hw.mac.phy_addr;
9586                 /* this command reads phy id and register at the same time */
9587                 fallthrough;
9588         case SIOCGMIIREG:
9589                 data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9590                 return 0;
9591
9592         case SIOCSMIIREG:
9593                 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9594         default:
9595                 return -EOPNOTSUPP;
9596         }
9597 }
9598
9599 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9600                           int cmd)
9601 {
9602         struct hclge_vport *vport = hclge_get_vport(handle);
9603         struct hclge_dev *hdev = vport->back;
9604
9605         switch (cmd) {
9606         case SIOCGHWTSTAMP:
9607                 return hclge_ptp_get_cfg(hdev, ifr);
9608         case SIOCSHWTSTAMP:
9609                 return hclge_ptp_set_cfg(hdev, ifr);
9610         default:
9611                 if (!hdev->hw.mac.phydev)
9612                         return hclge_mii_ioctl(hdev, ifr, cmd);
9613         }
9614
9615         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9616 }
9617
9618 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
9619                                              bool bypass_en)
9620 {
9621         struct hclge_port_vlan_filter_bypass_cmd *req;
9622         struct hclge_desc desc;
9623         int ret;
9624
9625         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
9626         req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
9627         req->vf_id = vf_id;
9628         hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
9629                       bypass_en ? 1 : 0);
9630
9631         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9632         if (ret)
9633                 dev_err(&hdev->pdev->dev,
9634                         "failed to set vport%u port vlan filter bypass state, ret = %d.\n",
9635                         vf_id, ret);
9636
9637         return ret;
9638 }
9639
9640 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9641                                       u8 fe_type, bool filter_en, u8 vf_id)
9642 {
9643         struct hclge_vlan_filter_ctrl_cmd *req;
9644         struct hclge_desc desc;
9645         int ret;
9646
9647         /* read current vlan filter parameter */
9648         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9649         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9650         req->vlan_type = vlan_type;
9651         req->vf_id = vf_id;
9652
9653         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9654         if (ret) {
9655                 dev_err(&hdev->pdev->dev,
9656                         "failed to get vlan filter config, ret = %d.\n", ret);
9657                 return ret;
9658         }
9659
9660         /* modify and write new config parameter */
9661         hclge_cmd_reuse_desc(&desc, false);
9662         req->vlan_fe = filter_en ?
9663                         (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9664
9665         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9666         if (ret)
9667                 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
9668                         ret);
9669
9670         return ret;
9671 }
9672
9673 static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
9674 {
9675         struct hclge_dev *hdev = vport->back;
9676         struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
9677         int ret;
9678
9679         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9680                 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9681                                                   HCLGE_FILTER_FE_EGRESS_V1_B,
9682                                                   enable, vport->vport_id);
9683
9684         ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9685                                          HCLGE_FILTER_FE_EGRESS, enable,
9686                                          vport->vport_id);
9687         if (ret)
9688                 return ret;
9689
9690         if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) {
9691                 ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
9692                                                         !enable);
9693         } else if (!vport->vport_id) {
9694                 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
9695                         enable = false;
9696
9697                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9698                                                  HCLGE_FILTER_FE_INGRESS,
9699                                                  enable, 0);
9700         }
9701
9702         return ret;
9703 }
9704
9705 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
9706 {
9707         struct hnae3_handle *handle = &vport->nic;
9708         struct hclge_vport_vlan_cfg *vlan, *tmp;
9709         struct hclge_dev *hdev = vport->back;
9710
9711         if (vport->vport_id) {
9712                 if (vport->port_base_vlan_cfg.state !=
9713                         HNAE3_PORT_BASE_VLAN_DISABLE)
9714                         return true;
9715
9716                 if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
9717                         return false;
9718         } else if (handle->netdev_flags & HNAE3_USER_UPE) {
9719                 return false;
9720         }
9721
9722         if (!vport->req_vlan_fltr_en)
9723                 return false;
9724
9725         /* compatible with former device, always enable vlan filter */
9726         if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
9727                 return true;
9728
9729         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
9730                 if (vlan->vlan_id != 0)
9731                         return true;
9732
9733         return false;
9734 }
9735
9736 int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
9737 {
9738         struct hclge_dev *hdev = vport->back;
9739         bool need_en;
9740         int ret;
9741
9742         mutex_lock(&hdev->vport_lock);
9743
9744         vport->req_vlan_fltr_en = request_en;
9745
9746         need_en = hclge_need_enable_vport_vlan_filter(vport);
9747         if (need_en == vport->cur_vlan_fltr_en) {
9748                 mutex_unlock(&hdev->vport_lock);
9749                 return 0;
9750         }
9751
9752         ret = hclge_set_vport_vlan_filter(vport, need_en);
9753         if (ret) {
9754                 mutex_unlock(&hdev->vport_lock);
9755                 return ret;
9756         }
9757
9758         vport->cur_vlan_fltr_en = need_en;
9759
9760         mutex_unlock(&hdev->vport_lock);
9761
9762         return 0;
9763 }
9764
9765 static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9766 {
9767         struct hclge_vport *vport = hclge_get_vport(handle);
9768
9769         return hclge_enable_vport_vlan_filter(vport, enable);
9770 }
9771
9772 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9773                                         bool is_kill, u16 vlan,
9774                                         struct hclge_desc *desc)
9775 {
9776         struct hclge_vlan_filter_vf_cfg_cmd *req0;
9777         struct hclge_vlan_filter_vf_cfg_cmd *req1;
9778         u8 vf_byte_val;
9779         u8 vf_byte_off;
9780         int ret;
9781
9782         hclge_cmd_setup_basic_desc(&desc[0],
9783                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9784         hclge_cmd_setup_basic_desc(&desc[1],
9785                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9786
9787         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9788
9789         vf_byte_off = vfid / 8;
9790         vf_byte_val = 1 << (vfid % 8);
9791
9792         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9793         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9794
9795         req0->vlan_id  = cpu_to_le16(vlan);
9796         req0->vlan_cfg = is_kill;
9797
9798         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9799                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9800         else
9801                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9802
9803         ret = hclge_cmd_send(&hdev->hw, desc, 2);
9804         if (ret) {
9805                 dev_err(&hdev->pdev->dev,
9806                         "Send vf vlan command fail, ret =%d.\n",
9807                         ret);
9808                 return ret;
9809         }
9810
9811         return 0;
9812 }
9813
9814 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9815                                           bool is_kill, struct hclge_desc *desc)
9816 {
9817         struct hclge_vlan_filter_vf_cfg_cmd *req;
9818
9819         req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9820
9821         if (!is_kill) {
9822 #define HCLGE_VF_VLAN_NO_ENTRY  2
9823                 if (!req->resp_code || req->resp_code == 1)
9824                         return 0;
9825
9826                 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9827                         set_bit(vfid, hdev->vf_vlan_full);
9828                         dev_warn(&hdev->pdev->dev,
9829                                  "vf vlan table is full, vf vlan filter is disabled\n");
9830                         return 0;
9831                 }
9832
9833                 dev_err(&hdev->pdev->dev,
9834                         "Add vf vlan filter fail, ret =%u.\n",
9835                         req->resp_code);
9836         } else {
9837 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
9838                 if (!req->resp_code)
9839                         return 0;
9840
9841                 /* vf vlan filter is disabled when vf vlan table is full,
9842                  * then new vlan id will not be added into vf vlan table.
9843                  * Just return 0 without warning, avoid massive verbose
9844                  * print logs when unload.
9845                  */
9846                 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9847                         return 0;
9848
9849                 dev_err(&hdev->pdev->dev,
9850                         "Kill vf vlan filter fail, ret =%u.\n",
9851                         req->resp_code);
9852         }
9853
9854         return -EIO;
9855 }
9856
9857 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9858                                     bool is_kill, u16 vlan)
9859 {
9860         struct hclge_vport *vport = &hdev->vport[vfid];
9861         struct hclge_desc desc[2];
9862         int ret;
9863
9864         /* if vf vlan table is full, firmware will close vf vlan filter, it
9865          * is unable and unnecessary to add new vlan id to vf vlan filter.
9866          * If spoof check is enable, and vf vlan is full, it shouldn't add
9867          * new vlan, because tx packets with these vlan id will be dropped.
9868          */
9869         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9870                 if (vport->vf_info.spoofchk && vlan) {
9871                         dev_err(&hdev->pdev->dev,
9872                                 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
9873                         return -EPERM;
9874                 }
9875                 return 0;
9876         }
9877
9878         ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9879         if (ret)
9880                 return ret;
9881
9882         return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9883 }
9884
9885 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9886                                       u16 vlan_id, bool is_kill)
9887 {
9888         struct hclge_vlan_filter_pf_cfg_cmd *req;
9889         struct hclge_desc desc;
9890         u8 vlan_offset_byte_val;
9891         u8 vlan_offset_byte;
9892         u8 vlan_offset_160;
9893         int ret;
9894
9895         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9896
9897         vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9898         vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9899                            HCLGE_VLAN_BYTE_SIZE;
9900         vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9901
9902         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9903         req->vlan_offset = vlan_offset_160;
9904         req->vlan_cfg = is_kill;
9905         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9906
9907         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9908         if (ret)
9909                 dev_err(&hdev->pdev->dev,
9910                         "port vlan command, send fail, ret =%d.\n", ret);
9911         return ret;
9912 }
9913
9914 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9915                                     u16 vport_id, u16 vlan_id,
9916                                     bool is_kill)
9917 {
9918         u16 vport_idx, vport_num = 0;
9919         int ret;
9920
9921         if (is_kill && !vlan_id)
9922                 return 0;
9923
9924         if (vlan_id >= VLAN_N_VID)
9925                 return -EINVAL;
9926
9927         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9928         if (ret) {
9929                 dev_err(&hdev->pdev->dev,
9930                         "Set %u vport vlan filter config fail, ret =%d.\n",
9931                         vport_id, ret);
9932                 return ret;
9933         }
9934
9935         /* vlan 0 may be added twice when 8021q module is enabled */
9936         if (!is_kill && !vlan_id &&
9937             test_bit(vport_id, hdev->vlan_table[vlan_id]))
9938                 return 0;
9939
9940         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9941                 dev_err(&hdev->pdev->dev,
9942                         "Add port vlan failed, vport %u is already in vlan %u\n",
9943                         vport_id, vlan_id);
9944                 return -EINVAL;
9945         }
9946
9947         if (is_kill &&
9948             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9949                 dev_err(&hdev->pdev->dev,
9950                         "Delete port vlan failed, vport %u is not in vlan %u\n",
9951                         vport_id, vlan_id);
9952                 return -EINVAL;
9953         }
9954
9955         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9956                 vport_num++;
9957
9958         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9959                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9960                                                  is_kill);
9961
9962         return ret;
9963 }
9964
9965 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9966 {
9967         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9968         struct hclge_vport_vtag_tx_cfg_cmd *req;
9969         struct hclge_dev *hdev = vport->back;
9970         struct hclge_desc desc;
9971         u16 bmap_index;
9972         int status;
9973
9974         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9975
9976         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9977         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9978         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9979         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9980                       vcfg->accept_tag1 ? 1 : 0);
9981         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9982                       vcfg->accept_untag1 ? 1 : 0);
9983         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9984                       vcfg->accept_tag2 ? 1 : 0);
9985         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9986                       vcfg->accept_untag2 ? 1 : 0);
9987         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9988                       vcfg->insert_tag1_en ? 1 : 0);
9989         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9990                       vcfg->insert_tag2_en ? 1 : 0);
9991         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9992                       vcfg->tag_shift_mode_en ? 1 : 0);
9993         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9994
9995         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9996         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9997                         HCLGE_VF_NUM_PER_BYTE;
9998         req->vf_bitmap[bmap_index] =
9999                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
10000
10001         status = hclge_cmd_send(&hdev->hw, &desc, 1);
10002         if (status)
10003                 dev_err(&hdev->pdev->dev,
10004                         "Send port txvlan cfg command fail, ret =%d\n",
10005                         status);
10006
10007         return status;
10008 }
10009
10010 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
10011 {
10012         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
10013         struct hclge_vport_vtag_rx_cfg_cmd *req;
10014         struct hclge_dev *hdev = vport->back;
10015         struct hclge_desc desc;
10016         u16 bmap_index;
10017         int status;
10018
10019         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
10020
10021         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
10022         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
10023                       vcfg->strip_tag1_en ? 1 : 0);
10024         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
10025                       vcfg->strip_tag2_en ? 1 : 0);
10026         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
10027                       vcfg->vlan1_vlan_prionly ? 1 : 0);
10028         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
10029                       vcfg->vlan2_vlan_prionly ? 1 : 0);
10030         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
10031                       vcfg->strip_tag1_discard_en ? 1 : 0);
10032         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
10033                       vcfg->strip_tag2_discard_en ? 1 : 0);
10034
10035         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
10036         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
10037                         HCLGE_VF_NUM_PER_BYTE;
10038         req->vf_bitmap[bmap_index] =
10039                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
10040
10041         status = hclge_cmd_send(&hdev->hw, &desc, 1);
10042         if (status)
10043                 dev_err(&hdev->pdev->dev,
10044                         "Send port rxvlan cfg command fail, ret =%d\n",
10045                         status);
10046
10047         return status;
10048 }
10049
10050 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
10051                                   u16 port_base_vlan_state,
10052                                   u16 vlan_tag, u8 qos)
10053 {
10054         int ret;
10055
10056         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10057                 vport->txvlan_cfg.accept_tag1 = true;
10058                 vport->txvlan_cfg.insert_tag1_en = false;
10059                 vport->txvlan_cfg.default_tag1 = 0;
10060         } else {
10061                 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
10062
10063                 vport->txvlan_cfg.accept_tag1 =
10064                         ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
10065                 vport->txvlan_cfg.insert_tag1_en = true;
10066                 vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
10067                                                  vlan_tag;
10068         }
10069
10070         vport->txvlan_cfg.accept_untag1 = true;
10071
10072         /* accept_tag2 and accept_untag2 are not supported on
10073          * pdev revision(0x20), new revision support them,
10074          * this two fields can not be configured by user.
10075          */
10076         vport->txvlan_cfg.accept_tag2 = true;
10077         vport->txvlan_cfg.accept_untag2 = true;
10078         vport->txvlan_cfg.insert_tag2_en = false;
10079         vport->txvlan_cfg.default_tag2 = 0;
10080         vport->txvlan_cfg.tag_shift_mode_en = true;
10081
10082         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10083                 vport->rxvlan_cfg.strip_tag1_en = false;
10084                 vport->rxvlan_cfg.strip_tag2_en =
10085                                 vport->rxvlan_cfg.rx_vlan_offload_en;
10086                 vport->rxvlan_cfg.strip_tag2_discard_en = false;
10087         } else {
10088                 vport->rxvlan_cfg.strip_tag1_en =
10089                                 vport->rxvlan_cfg.rx_vlan_offload_en;
10090                 vport->rxvlan_cfg.strip_tag2_en = true;
10091                 vport->rxvlan_cfg.strip_tag2_discard_en = true;
10092         }
10093
10094         vport->rxvlan_cfg.strip_tag1_discard_en = false;
10095         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10096         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10097
10098         ret = hclge_set_vlan_tx_offload_cfg(vport);
10099         if (ret)
10100                 return ret;
10101
10102         return hclge_set_vlan_rx_offload_cfg(vport);
10103 }
10104
10105 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
10106 {
10107         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
10108         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
10109         struct hclge_desc desc;
10110         int status;
10111
10112         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
10113         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
10114         rx_req->ot_fst_vlan_type =
10115                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
10116         rx_req->ot_sec_vlan_type =
10117                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
10118         rx_req->in_fst_vlan_type =
10119                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
10120         rx_req->in_sec_vlan_type =
10121                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
10122
10123         status = hclge_cmd_send(&hdev->hw, &desc, 1);
10124         if (status) {
10125                 dev_err(&hdev->pdev->dev,
10126                         "Send rxvlan protocol type command fail, ret =%d\n",
10127                         status);
10128                 return status;
10129         }
10130
10131         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
10132
10133         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
10134         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
10135         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
10136
10137         status = hclge_cmd_send(&hdev->hw, &desc, 1);
10138         if (status)
10139                 dev_err(&hdev->pdev->dev,
10140                         "Send txvlan protocol type command fail, ret =%d\n",
10141                         status);
10142
10143         return status;
10144 }
10145
10146 static int hclge_init_vlan_config(struct hclge_dev *hdev)
10147 {
10148 #define HCLGE_DEF_VLAN_TYPE             0x8100
10149
10150         struct hnae3_handle *handle = &hdev->vport[0].nic;
10151         struct hclge_vport *vport;
10152         int ret;
10153         int i;
10154
10155         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
10156                 /* for revision 0x21, vf vlan filter is per function */
10157                 for (i = 0; i < hdev->num_alloc_vport; i++) {
10158                         vport = &hdev->vport[i];
10159                         ret = hclge_set_vlan_filter_ctrl(hdev,
10160                                                          HCLGE_FILTER_TYPE_VF,
10161                                                          HCLGE_FILTER_FE_EGRESS,
10162                                                          true,
10163                                                          vport->vport_id);
10164                         if (ret)
10165                                 return ret;
10166                         vport->cur_vlan_fltr_en = true;
10167                 }
10168
10169                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
10170                                                  HCLGE_FILTER_FE_INGRESS, true,
10171                                                  0);
10172                 if (ret)
10173                         return ret;
10174         } else {
10175                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10176                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
10177                                                  true, 0);
10178                 if (ret)
10179                         return ret;
10180         }
10181
10182         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10183         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10184         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10185         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10186         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
10187         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
10188
10189         ret = hclge_set_vlan_protocol_type(hdev);
10190         if (ret)
10191                 return ret;
10192
10193         for (i = 0; i < hdev->num_alloc_vport; i++) {
10194                 u16 vlan_tag;
10195                 u8 qos;
10196
10197                 vport = &hdev->vport[i];
10198                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10199                 qos = vport->port_base_vlan_cfg.vlan_info.qos;
10200
10201                 ret = hclge_vlan_offload_cfg(vport,
10202                                              vport->port_base_vlan_cfg.state,
10203                                              vlan_tag, qos);
10204                 if (ret)
10205                         return ret;
10206         }
10207
10208         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
10209 }
10210
10211 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10212                                        bool writen_to_tbl)
10213 {
10214         struct hclge_vport_vlan_cfg *vlan, *tmp;
10215
10216         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
10217                 if (vlan->vlan_id == vlan_id)
10218                         return;
10219
10220         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
10221         if (!vlan)
10222                 return;
10223
10224         vlan->hd_tbl_status = writen_to_tbl;
10225         vlan->vlan_id = vlan_id;
10226
10227         list_add_tail(&vlan->node, &vport->vlan_list);
10228 }
10229
10230 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
10231 {
10232         struct hclge_vport_vlan_cfg *vlan, *tmp;
10233         struct hclge_dev *hdev = vport->back;
10234         int ret;
10235
10236         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10237                 if (!vlan->hd_tbl_status) {
10238                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10239                                                        vport->vport_id,
10240                                                        vlan->vlan_id, false);
10241                         if (ret) {
10242                                 dev_err(&hdev->pdev->dev,
10243                                         "restore vport vlan list failed, ret=%d\n",
10244                                         ret);
10245                                 return ret;
10246                         }
10247                 }
10248                 vlan->hd_tbl_status = true;
10249         }
10250
10251         return 0;
10252 }
10253
10254 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10255                                       bool is_write_tbl)
10256 {
10257         struct hclge_vport_vlan_cfg *vlan, *tmp;
10258         struct hclge_dev *hdev = vport->back;
10259
10260         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10261                 if (vlan->vlan_id == vlan_id) {
10262                         if (is_write_tbl && vlan->hd_tbl_status)
10263                                 hclge_set_vlan_filter_hw(hdev,
10264                                                          htons(ETH_P_8021Q),
10265                                                          vport->vport_id,
10266                                                          vlan_id,
10267                                                          true);
10268
10269                         list_del(&vlan->node);
10270                         kfree(vlan);
10271                         break;
10272                 }
10273         }
10274 }
10275
10276 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
10277 {
10278         struct hclge_vport_vlan_cfg *vlan, *tmp;
10279         struct hclge_dev *hdev = vport->back;
10280
10281         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10282                 if (vlan->hd_tbl_status)
10283                         hclge_set_vlan_filter_hw(hdev,
10284                                                  htons(ETH_P_8021Q),
10285                                                  vport->vport_id,
10286                                                  vlan->vlan_id,
10287                                                  true);
10288
10289                 vlan->hd_tbl_status = false;
10290                 if (is_del_list) {
10291                         list_del(&vlan->node);
10292                         kfree(vlan);
10293                 }
10294         }
10295         clear_bit(vport->vport_id, hdev->vf_vlan_full);
10296 }
10297
10298 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
10299 {
10300         struct hclge_vport_vlan_cfg *vlan, *tmp;
10301         struct hclge_vport *vport;
10302         int i;
10303
10304         for (i = 0; i < hdev->num_alloc_vport; i++) {
10305                 vport = &hdev->vport[i];
10306                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10307                         list_del(&vlan->node);
10308                         kfree(vlan);
10309                 }
10310         }
10311 }
10312
10313 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
10314 {
10315         struct hclge_vport_vlan_cfg *vlan, *tmp;
10316         struct hclge_dev *hdev = vport->back;
10317         u16 vlan_proto;
10318         u16 vlan_id;
10319         u16 state;
10320         int ret;
10321
10322         vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
10323         vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10324         state = vport->port_base_vlan_cfg.state;
10325
10326         if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
10327                 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
10328                 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
10329                                          vport->vport_id, vlan_id,
10330                                          false);
10331                 return;
10332         }
10333
10334         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10335                 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10336                                                vport->vport_id,
10337                                                vlan->vlan_id, false);
10338                 if (ret)
10339                         break;
10340                 vlan->hd_tbl_status = true;
10341         }
10342 }
10343
10344 /* For global reset and imp reset, hardware will clear the mac table,
10345  * so we change the mac address state from ACTIVE to TO_ADD, then they
10346  * can be restored in the service task after reset complete. Furtherly,
10347  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
10348  * be restored after reset, so just remove these mac nodes from mac_list.
10349  */
10350 static void hclge_mac_node_convert_for_reset(struct list_head *list)
10351 {
10352         struct hclge_mac_node *mac_node, *tmp;
10353
10354         list_for_each_entry_safe(mac_node, tmp, list, node) {
10355                 if (mac_node->state == HCLGE_MAC_ACTIVE) {
10356                         mac_node->state = HCLGE_MAC_TO_ADD;
10357                 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10358                         list_del(&mac_node->node);
10359                         kfree(mac_node);
10360                 }
10361         }
10362 }
10363
10364 void hclge_restore_mac_table_common(struct hclge_vport *vport)
10365 {
10366         spin_lock_bh(&vport->mac_list_lock);
10367
10368         hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10369         hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10370         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10371
10372         spin_unlock_bh(&vport->mac_list_lock);
10373 }
10374
10375 static void hclge_restore_hw_table(struct hclge_dev *hdev)
10376 {
10377         struct hclge_vport *vport = &hdev->vport[0];
10378         struct hnae3_handle *handle = &vport->nic;
10379
10380         hclge_restore_mac_table_common(vport);
10381         hclge_restore_vport_vlan_table(vport);
10382         set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10383         hclge_restore_fd_entries(handle);
10384 }
10385
10386 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10387 {
10388         struct hclge_vport *vport = hclge_get_vport(handle);
10389
10390         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10391                 vport->rxvlan_cfg.strip_tag1_en = false;
10392                 vport->rxvlan_cfg.strip_tag2_en = enable;
10393                 vport->rxvlan_cfg.strip_tag2_discard_en = false;
10394         } else {
10395                 vport->rxvlan_cfg.strip_tag1_en = enable;
10396                 vport->rxvlan_cfg.strip_tag2_en = true;
10397                 vport->rxvlan_cfg.strip_tag2_discard_en = true;
10398         }
10399
10400         vport->rxvlan_cfg.strip_tag1_discard_en = false;
10401         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10402         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10403         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10404
10405         return hclge_set_vlan_rx_offload_cfg(vport);
10406 }
10407
10408 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
10409 {
10410         struct hclge_dev *hdev = vport->back;
10411
10412         if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
10413                 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
10414 }
10415
10416 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10417                                             u16 port_base_vlan_state,
10418                                             struct hclge_vlan_info *new_info,
10419                                             struct hclge_vlan_info *old_info)
10420 {
10421         struct hclge_dev *hdev = vport->back;
10422         int ret;
10423
10424         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10425                 hclge_rm_vport_all_vlan_table(vport, false);
10426                 /* force clear VLAN 0 */
10427                 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
10428                 if (ret)
10429                         return ret;
10430                 return hclge_set_vlan_filter_hw(hdev,
10431                                                  htons(new_info->vlan_proto),
10432                                                  vport->vport_id,
10433                                                  new_info->vlan_tag,
10434                                                  false);
10435         }
10436
10437         /* force add VLAN 0 */
10438         ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
10439         if (ret)
10440                 return ret;
10441
10442         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10443                                        vport->vport_id, old_info->vlan_tag,
10444                                        true);
10445         if (ret)
10446                 return ret;
10447
10448         return hclge_add_vport_all_vlan_table(vport);
10449 }
10450
10451 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
10452                                           const struct hclge_vlan_info *old_cfg)
10453 {
10454         if (new_cfg->vlan_tag != old_cfg->vlan_tag)
10455                 return true;
10456
10457         if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
10458                 return true;
10459
10460         return false;
10461 }
10462
10463 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10464                                     struct hclge_vlan_info *vlan_info)
10465 {
10466         struct hnae3_handle *nic = &vport->nic;
10467         struct hclge_vlan_info *old_vlan_info;
10468         struct hclge_dev *hdev = vport->back;
10469         int ret;
10470
10471         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10472
10473         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
10474                                      vlan_info->qos);
10475         if (ret)
10476                 return ret;
10477
10478         if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
10479                 goto out;
10480
10481         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
10482                 /* add new VLAN tag */
10483                 ret = hclge_set_vlan_filter_hw(hdev,
10484                                                htons(vlan_info->vlan_proto),
10485                                                vport->vport_id,
10486                                                vlan_info->vlan_tag,
10487                                                false);
10488                 if (ret)
10489                         return ret;
10490
10491                 /* remove old VLAN tag */
10492                 if (old_vlan_info->vlan_tag == 0)
10493                         ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
10494                                                        true, 0);
10495                 else
10496                         ret = hclge_set_vlan_filter_hw(hdev,
10497                                                        htons(ETH_P_8021Q),
10498                                                        vport->vport_id,
10499                                                        old_vlan_info->vlan_tag,
10500                                                        true);
10501                 if (ret) {
10502                         dev_err(&hdev->pdev->dev,
10503                                 "failed to clear vport%u port base vlan %u, ret = %d.\n",
10504                                 vport->vport_id, old_vlan_info->vlan_tag, ret);
10505                         return ret;
10506                 }
10507
10508                 goto out;
10509         }
10510
10511         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10512                                                old_vlan_info);
10513         if (ret)
10514                 return ret;
10515
10516 out:
10517         vport->port_base_vlan_cfg.state = state;
10518         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10519                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10520         else
10521                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10522
10523         vport->port_base_vlan_cfg.vlan_info = *vlan_info;
10524         hclge_set_vport_vlan_fltr_change(vport);
10525
10526         return 0;
10527 }
10528
10529 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10530                                           enum hnae3_port_base_vlan_state state,
10531                                           u16 vlan, u8 qos)
10532 {
10533         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10534                 if (!vlan && !qos)
10535                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10536
10537                 return HNAE3_PORT_BASE_VLAN_ENABLE;
10538         }
10539
10540         if (!vlan && !qos)
10541                 return HNAE3_PORT_BASE_VLAN_DISABLE;
10542
10543         if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
10544             vport->port_base_vlan_cfg.vlan_info.qos == qos)
10545                 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10546
10547         return HNAE3_PORT_BASE_VLAN_MODIFY;
10548 }
10549
10550 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10551                                     u16 vlan, u8 qos, __be16 proto)
10552 {
10553         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10554         struct hclge_vport *vport = hclge_get_vport(handle);
10555         struct hclge_dev *hdev = vport->back;
10556         struct hclge_vlan_info vlan_info;
10557         u16 state;
10558         int ret;
10559
10560         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10561                 return -EOPNOTSUPP;
10562
10563         vport = hclge_get_vf_vport(hdev, vfid);
10564         if (!vport)
10565                 return -EINVAL;
10566
10567         /* qos is a 3 bits value, so can not be bigger than 7 */
10568         if (vlan > VLAN_N_VID - 1 || qos > 7)
10569                 return -EINVAL;
10570         if (proto != htons(ETH_P_8021Q))
10571                 return -EPROTONOSUPPORT;
10572
10573         state = hclge_get_port_base_vlan_state(vport,
10574                                                vport->port_base_vlan_cfg.state,
10575                                                vlan, qos);
10576         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10577                 return 0;
10578
10579         vlan_info.vlan_tag = vlan;
10580         vlan_info.qos = qos;
10581         vlan_info.vlan_proto = ntohs(proto);
10582
10583         ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10584         if (ret) {
10585                 dev_err(&hdev->pdev->dev,
10586                         "failed to update port base vlan for vf %d, ret = %d\n",
10587                         vfid, ret);
10588                 return ret;
10589         }
10590
10591         /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10592          * VLAN state.
10593          */
10594         if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
10595             test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10596                 hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10597                                                   vport->vport_id, state,
10598                                                   &vlan_info);
10599
10600         return 0;
10601 }
10602
10603 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10604 {
10605         struct hclge_vlan_info *vlan_info;
10606         struct hclge_vport *vport;
10607         int ret;
10608         int vf;
10609
10610         /* clear port base vlan for all vf */
10611         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10612                 vport = &hdev->vport[vf];
10613                 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10614
10615                 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10616                                                vport->vport_id,
10617                                                vlan_info->vlan_tag, true);
10618                 if (ret)
10619                         dev_err(&hdev->pdev->dev,
10620                                 "failed to clear vf vlan for vf%d, ret = %d\n",
10621                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
10622         }
10623 }
10624
10625 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10626                           u16 vlan_id, bool is_kill)
10627 {
10628         struct hclge_vport *vport = hclge_get_vport(handle);
10629         struct hclge_dev *hdev = vport->back;
10630         bool writen_to_tbl = false;
10631         int ret = 0;
10632
10633         /* When device is resetting or reset failed, firmware is unable to
10634          * handle mailbox. Just record the vlan id, and remove it after
10635          * reset finished.
10636          */
10637         if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10638              test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10639                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10640                 return -EBUSY;
10641         }
10642
10643         /* when port base vlan enabled, we use port base vlan as the vlan
10644          * filter entry. In this case, we don't update vlan filter table
10645          * when user add new vlan or remove exist vlan, just update the vport
10646          * vlan list. The vlan id in vlan list will be writen in vlan filter
10647          * table until port base vlan disabled
10648          */
10649         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10650                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10651                                                vlan_id, is_kill);
10652                 writen_to_tbl = true;
10653         }
10654
10655         if (!ret) {
10656                 if (is_kill)
10657                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
10658                 else
10659                         hclge_add_vport_vlan_table(vport, vlan_id,
10660                                                    writen_to_tbl);
10661         } else if (is_kill) {
10662                 /* when remove hw vlan filter failed, record the vlan id,
10663                  * and try to remove it from hw later, to be consistence
10664                  * with stack
10665                  */
10666                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10667         }
10668
10669         hclge_set_vport_vlan_fltr_change(vport);
10670
10671         return ret;
10672 }
10673
10674 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
10675 {
10676         struct hclge_vport *vport;
10677         int ret;
10678         u16 i;
10679
10680         for (i = 0; i < hdev->num_alloc_vport; i++) {
10681                 vport = &hdev->vport[i];
10682                 if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10683                                         &vport->state))
10684                         continue;
10685
10686                 ret = hclge_enable_vport_vlan_filter(vport,
10687                                                      vport->req_vlan_fltr_en);
10688                 if (ret) {
10689                         dev_err(&hdev->pdev->dev,
10690                                 "failed to sync vlan filter state for vport%u, ret = %d\n",
10691                                 vport->vport_id, ret);
10692                         set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10693                                 &vport->state);
10694                         return;
10695                 }
10696         }
10697 }
10698
10699 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10700 {
10701 #define HCLGE_MAX_SYNC_COUNT    60
10702
10703         int i, ret, sync_cnt = 0;
10704         u16 vlan_id;
10705
10706         /* start from vport 1 for PF is always alive */
10707         for (i = 0; i < hdev->num_alloc_vport; i++) {
10708                 struct hclge_vport *vport = &hdev->vport[i];
10709
10710                 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10711                                          VLAN_N_VID);
10712                 while (vlan_id != VLAN_N_VID) {
10713                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10714                                                        vport->vport_id, vlan_id,
10715                                                        true);
10716                         if (ret && ret != -EINVAL)
10717                                 return;
10718
10719                         clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10720                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
10721                         hclge_set_vport_vlan_fltr_change(vport);
10722
10723                         sync_cnt++;
10724                         if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
10725                                 return;
10726
10727                         vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10728                                                  VLAN_N_VID);
10729                 }
10730         }
10731
10732         hclge_sync_vlan_fltr_state(hdev);
10733 }
10734
10735 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10736 {
10737         struct hclge_config_max_frm_size_cmd *req;
10738         struct hclge_desc desc;
10739
10740         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10741
10742         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10743         req->max_frm_size = cpu_to_le16(new_mps);
10744         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10745
10746         return hclge_cmd_send(&hdev->hw, &desc, 1);
10747 }
10748
10749 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10750 {
10751         struct hclge_vport *vport = hclge_get_vport(handle);
10752
10753         return hclge_set_vport_mtu(vport, new_mtu);
10754 }
10755
10756 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10757 {
10758         struct hclge_dev *hdev = vport->back;
10759         int i, max_frm_size, ret;
10760
10761         /* HW supprt 2 layer vlan */
10762         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10763         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10764             max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10765                 return -EINVAL;
10766
10767         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10768         mutex_lock(&hdev->vport_lock);
10769         /* VF's mps must fit within hdev->mps */
10770         if (vport->vport_id && max_frm_size > hdev->mps) {
10771                 mutex_unlock(&hdev->vport_lock);
10772                 return -EINVAL;
10773         } else if (vport->vport_id) {
10774                 vport->mps = max_frm_size;
10775                 mutex_unlock(&hdev->vport_lock);
10776                 return 0;
10777         }
10778
10779         /* PF's mps must be greater then VF's mps */
10780         for (i = 1; i < hdev->num_alloc_vport; i++)
10781                 if (max_frm_size < hdev->vport[i].mps) {
10782                         mutex_unlock(&hdev->vport_lock);
10783                         return -EINVAL;
10784                 }
10785
10786         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10787
10788         ret = hclge_set_mac_mtu(hdev, max_frm_size);
10789         if (ret) {
10790                 dev_err(&hdev->pdev->dev,
10791                         "Change mtu fail, ret =%d\n", ret);
10792                 goto out;
10793         }
10794
10795         hdev->mps = max_frm_size;
10796         vport->mps = max_frm_size;
10797
10798         ret = hclge_buffer_alloc(hdev);
10799         if (ret)
10800                 dev_err(&hdev->pdev->dev,
10801                         "Allocate buffer fail, ret =%d\n", ret);
10802
10803 out:
10804         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10805         mutex_unlock(&hdev->vport_lock);
10806         return ret;
10807 }
10808
10809 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10810                                     bool enable)
10811 {
10812         struct hclge_reset_tqp_queue_cmd *req;
10813         struct hclge_desc desc;
10814         int ret;
10815
10816         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10817
10818         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10819         req->tqp_id = cpu_to_le16(queue_id);
10820         if (enable)
10821                 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10822
10823         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10824         if (ret) {
10825                 dev_err(&hdev->pdev->dev,
10826                         "Send tqp reset cmd error, status =%d\n", ret);
10827                 return ret;
10828         }
10829
10830         return 0;
10831 }
10832
10833 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id,
10834                                   u8 *reset_status)
10835 {
10836         struct hclge_reset_tqp_queue_cmd *req;
10837         struct hclge_desc desc;
10838         int ret;
10839
10840         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10841
10842         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10843         req->tqp_id = cpu_to_le16(queue_id);
10844
10845         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10846         if (ret) {
10847                 dev_err(&hdev->pdev->dev,
10848                         "Get reset status error, status =%d\n", ret);
10849                 return ret;
10850         }
10851
10852         *reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10853
10854         return 0;
10855 }
10856
10857 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10858 {
10859         struct hnae3_queue *queue;
10860         struct hclge_tqp *tqp;
10861
10862         queue = handle->kinfo.tqp[queue_id];
10863         tqp = container_of(queue, struct hclge_tqp, q);
10864
10865         return tqp->index;
10866 }
10867
10868 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10869 {
10870         struct hclge_vport *vport = hclge_get_vport(handle);
10871         struct hclge_dev *hdev = vport->back;
10872         u16 reset_try_times = 0;
10873         u8 reset_status;
10874         u16 queue_gid;
10875         int ret;
10876         u16 i;
10877
10878         for (i = 0; i < handle->kinfo.num_tqps; i++) {
10879                 queue_gid = hclge_covert_handle_qid_global(handle, i);
10880                 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10881                 if (ret) {
10882                         dev_err(&hdev->pdev->dev,
10883                                 "failed to send reset tqp cmd, ret = %d\n",
10884                                 ret);
10885                         return ret;
10886                 }
10887
10888                 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10889                         ret = hclge_get_reset_status(hdev, queue_gid,
10890                                                      &reset_status);
10891                         if (ret)
10892                                 return ret;
10893
10894                         if (reset_status)
10895                                 break;
10896
10897                         /* Wait for tqp hw reset */
10898                         usleep_range(1000, 1200);
10899                 }
10900
10901                 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10902                         dev_err(&hdev->pdev->dev,
10903                                 "wait for tqp hw reset timeout\n");
10904                         return -ETIME;
10905                 }
10906
10907                 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10908                 if (ret) {
10909                         dev_err(&hdev->pdev->dev,
10910                                 "failed to deassert soft reset, ret = %d\n",
10911                                 ret);
10912                         return ret;
10913                 }
10914                 reset_try_times = 0;
10915         }
10916         return 0;
10917 }
10918
10919 static int hclge_reset_rcb(struct hnae3_handle *handle)
10920 {
10921 #define HCLGE_RESET_RCB_NOT_SUPPORT     0U
10922 #define HCLGE_RESET_RCB_SUCCESS         1U
10923
10924         struct hclge_vport *vport = hclge_get_vport(handle);
10925         struct hclge_dev *hdev = vport->back;
10926         struct hclge_reset_cmd *req;
10927         struct hclge_desc desc;
10928         u8 return_status;
10929         u16 queue_gid;
10930         int ret;
10931
10932         queue_gid = hclge_covert_handle_qid_global(handle, 0);
10933
10934         req = (struct hclge_reset_cmd *)desc.data;
10935         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10936         hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10937         req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10938         req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10939
10940         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10941         if (ret) {
10942                 dev_err(&hdev->pdev->dev,
10943                         "failed to send rcb reset cmd, ret = %d\n", ret);
10944                 return ret;
10945         }
10946
10947         return_status = req->fun_reset_rcb_return_status;
10948         if (return_status == HCLGE_RESET_RCB_SUCCESS)
10949                 return 0;
10950
10951         if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10952                 dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10953                         return_status);
10954                 return -EIO;
10955         }
10956
10957         /* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10958          * again to reset all tqps
10959          */
10960         return hclge_reset_tqp_cmd(handle);
10961 }
10962
10963 int hclge_reset_tqp(struct hnae3_handle *handle)
10964 {
10965         struct hclge_vport *vport = hclge_get_vport(handle);
10966         struct hclge_dev *hdev = vport->back;
10967         int ret;
10968
10969         /* only need to disable PF's tqp */
10970         if (!vport->vport_id) {
10971                 ret = hclge_tqp_enable(handle, false);
10972                 if (ret) {
10973                         dev_err(&hdev->pdev->dev,
10974                                 "failed to disable tqp, ret = %d\n", ret);
10975                         return ret;
10976                 }
10977         }
10978
10979         return hclge_reset_rcb(handle);
10980 }
10981
10982 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10983 {
10984         struct hclge_vport *vport = hclge_get_vport(handle);
10985         struct hclge_dev *hdev = vport->back;
10986
10987         return hdev->fw_version;
10988 }
10989
10990 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10991 {
10992         struct phy_device *phydev = hdev->hw.mac.phydev;
10993
10994         if (!phydev)
10995                 return;
10996
10997         phy_set_asym_pause(phydev, rx_en, tx_en);
10998 }
10999
11000 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
11001 {
11002         int ret;
11003
11004         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
11005                 return 0;
11006
11007         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
11008         if (ret)
11009                 dev_err(&hdev->pdev->dev,
11010                         "configure pauseparam error, ret = %d.\n", ret);
11011
11012         return ret;
11013 }
11014
11015 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
11016 {
11017         struct phy_device *phydev = hdev->hw.mac.phydev;
11018         u16 remote_advertising = 0;
11019         u16 local_advertising;
11020         u32 rx_pause, tx_pause;
11021         u8 flowctl;
11022
11023         if (!phydev->link || !phydev->autoneg)
11024                 return 0;
11025
11026         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
11027
11028         if (phydev->pause)
11029                 remote_advertising = LPA_PAUSE_CAP;
11030
11031         if (phydev->asym_pause)
11032                 remote_advertising |= LPA_PAUSE_ASYM;
11033
11034         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
11035                                            remote_advertising);
11036         tx_pause = flowctl & FLOW_CTRL_TX;
11037         rx_pause = flowctl & FLOW_CTRL_RX;
11038
11039         if (phydev->duplex == HCLGE_MAC_HALF) {
11040                 tx_pause = 0;
11041                 rx_pause = 0;
11042         }
11043
11044         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
11045 }
11046
11047 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
11048                                  u32 *rx_en, u32 *tx_en)
11049 {
11050         struct hclge_vport *vport = hclge_get_vport(handle);
11051         struct hclge_dev *hdev = vport->back;
11052         u8 media_type = hdev->hw.mac.media_type;
11053
11054         *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
11055                     hclge_get_autoneg(handle) : 0;
11056
11057         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
11058                 *rx_en = 0;
11059                 *tx_en = 0;
11060                 return;
11061         }
11062
11063         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
11064                 *rx_en = 1;
11065                 *tx_en = 0;
11066         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
11067                 *tx_en = 1;
11068                 *rx_en = 0;
11069         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
11070                 *rx_en = 1;
11071                 *tx_en = 1;
11072         } else {
11073                 *rx_en = 0;
11074                 *tx_en = 0;
11075         }
11076 }
11077
11078 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
11079                                          u32 rx_en, u32 tx_en)
11080 {
11081         if (rx_en && tx_en)
11082                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
11083         else if (rx_en && !tx_en)
11084                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
11085         else if (!rx_en && tx_en)
11086                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
11087         else
11088                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
11089
11090         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
11091 }
11092
11093 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
11094                                 u32 rx_en, u32 tx_en)
11095 {
11096         struct hclge_vport *vport = hclge_get_vport(handle);
11097         struct hclge_dev *hdev = vport->back;
11098         struct phy_device *phydev = hdev->hw.mac.phydev;
11099         u32 fc_autoneg;
11100
11101         if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
11102                 fc_autoneg = hclge_get_autoneg(handle);
11103                 if (auto_neg != fc_autoneg) {
11104                         dev_info(&hdev->pdev->dev,
11105                                  "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
11106                         return -EOPNOTSUPP;
11107                 }
11108         }
11109
11110         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
11111                 dev_info(&hdev->pdev->dev,
11112                          "Priority flow control enabled. Cannot set link flow control.\n");
11113                 return -EOPNOTSUPP;
11114         }
11115
11116         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
11117
11118         hclge_record_user_pauseparam(hdev, rx_en, tx_en);
11119
11120         if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
11121                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
11122
11123         if (phydev)
11124                 return phy_start_aneg(phydev);
11125
11126         return -EOPNOTSUPP;
11127 }
11128
11129 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
11130                                           u8 *auto_neg, u32 *speed, u8 *duplex)
11131 {
11132         struct hclge_vport *vport = hclge_get_vport(handle);
11133         struct hclge_dev *hdev = vport->back;
11134
11135         if (speed)
11136                 *speed = hdev->hw.mac.speed;
11137         if (duplex)
11138                 *duplex = hdev->hw.mac.duplex;
11139         if (auto_neg)
11140                 *auto_neg = hdev->hw.mac.autoneg;
11141 }
11142
11143 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
11144                                  u8 *module_type)
11145 {
11146         struct hclge_vport *vport = hclge_get_vport(handle);
11147         struct hclge_dev *hdev = vport->back;
11148
11149         /* When nic is down, the service task is not running, doesn't update
11150          * the port information per second. Query the port information before
11151          * return the media type, ensure getting the correct media information.
11152          */
11153         hclge_update_port_info(hdev);
11154
11155         if (media_type)
11156                 *media_type = hdev->hw.mac.media_type;
11157
11158         if (module_type)
11159                 *module_type = hdev->hw.mac.module_type;
11160 }
11161
11162 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
11163                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
11164 {
11165         struct hclge_vport *vport = hclge_get_vport(handle);
11166         struct hclge_dev *hdev = vport->back;
11167         struct phy_device *phydev = hdev->hw.mac.phydev;
11168         int mdix_ctrl, mdix, is_resolved;
11169         unsigned int retval;
11170
11171         if (!phydev) {
11172                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11173                 *tp_mdix = ETH_TP_MDI_INVALID;
11174                 return;
11175         }
11176
11177         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
11178
11179         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
11180         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
11181                                     HCLGE_PHY_MDIX_CTRL_S);
11182
11183         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
11184         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
11185         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
11186
11187         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
11188
11189         switch (mdix_ctrl) {
11190         case 0x0:
11191                 *tp_mdix_ctrl = ETH_TP_MDI;
11192                 break;
11193         case 0x1:
11194                 *tp_mdix_ctrl = ETH_TP_MDI_X;
11195                 break;
11196         case 0x3:
11197                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
11198                 break;
11199         default:
11200                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11201                 break;
11202         }
11203
11204         if (!is_resolved)
11205                 *tp_mdix = ETH_TP_MDI_INVALID;
11206         else if (mdix)
11207                 *tp_mdix = ETH_TP_MDI_X;
11208         else
11209                 *tp_mdix = ETH_TP_MDI;
11210 }
11211
11212 static void hclge_info_show(struct hclge_dev *hdev)
11213 {
11214         struct device *dev = &hdev->pdev->dev;
11215
11216         dev_info(dev, "PF info begin:\n");
11217
11218         dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
11219         dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
11220         dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
11221         dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
11222         dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
11223         dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
11224         dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
11225         dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
11226         dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
11227         dev_info(dev, "This is %s PF\n",
11228                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
11229         dev_info(dev, "DCB %s\n",
11230                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
11231         dev_info(dev, "MQPRIO %s\n",
11232                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
11233         dev_info(dev, "Default tx spare buffer size: %u\n",
11234                  hdev->tx_spare_buf_size);
11235
11236         dev_info(dev, "PF info end.\n");
11237 }
11238
11239 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
11240                                           struct hclge_vport *vport)
11241 {
11242         struct hnae3_client *client = vport->nic.client;
11243         struct hclge_dev *hdev = ae_dev->priv;
11244         int rst_cnt = hdev->rst_stats.reset_cnt;
11245         int ret;
11246
11247         ret = client->ops->init_instance(&vport->nic);
11248         if (ret)
11249                 return ret;
11250
11251         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11252         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11253             rst_cnt != hdev->rst_stats.reset_cnt) {
11254                 ret = -EBUSY;
11255                 goto init_nic_err;
11256         }
11257
11258         /* Enable nic hw error interrupts */
11259         ret = hclge_config_nic_hw_error(hdev, true);
11260         if (ret) {
11261                 dev_err(&ae_dev->pdev->dev,
11262                         "fail(%d) to enable hw error interrupts\n", ret);
11263                 goto init_nic_err;
11264         }
11265
11266         hnae3_set_client_init_flag(client, ae_dev, 1);
11267
11268         if (netif_msg_drv(&hdev->vport->nic))
11269                 hclge_info_show(hdev);
11270
11271         return ret;
11272
11273 init_nic_err:
11274         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11275         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11276                 msleep(HCLGE_WAIT_RESET_DONE);
11277
11278         client->ops->uninit_instance(&vport->nic, 0);
11279
11280         return ret;
11281 }
11282
11283 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
11284                                            struct hclge_vport *vport)
11285 {
11286         struct hclge_dev *hdev = ae_dev->priv;
11287         struct hnae3_client *client;
11288         int rst_cnt;
11289         int ret;
11290
11291         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
11292             !hdev->nic_client)
11293                 return 0;
11294
11295         client = hdev->roce_client;
11296         ret = hclge_init_roce_base_info(vport);
11297         if (ret)
11298                 return ret;
11299
11300         rst_cnt = hdev->rst_stats.reset_cnt;
11301         ret = client->ops->init_instance(&vport->roce);
11302         if (ret)
11303                 return ret;
11304
11305         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11306         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11307             rst_cnt != hdev->rst_stats.reset_cnt) {
11308                 ret = -EBUSY;
11309                 goto init_roce_err;
11310         }
11311
11312         /* Enable roce ras interrupts */
11313         ret = hclge_config_rocee_ras_interrupt(hdev, true);
11314         if (ret) {
11315                 dev_err(&ae_dev->pdev->dev,
11316                         "fail(%d) to enable roce ras interrupts\n", ret);
11317                 goto init_roce_err;
11318         }
11319
11320         hnae3_set_client_init_flag(client, ae_dev, 1);
11321
11322         return 0;
11323
11324 init_roce_err:
11325         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11326         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11327                 msleep(HCLGE_WAIT_RESET_DONE);
11328
11329         hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11330
11331         return ret;
11332 }
11333
11334 static int hclge_init_client_instance(struct hnae3_client *client,
11335                                       struct hnae3_ae_dev *ae_dev)
11336 {
11337         struct hclge_dev *hdev = ae_dev->priv;
11338         struct hclge_vport *vport = &hdev->vport[0];
11339         int ret;
11340
11341         switch (client->type) {
11342         case HNAE3_CLIENT_KNIC:
11343                 hdev->nic_client = client;
11344                 vport->nic.client = client;
11345                 ret = hclge_init_nic_client_instance(ae_dev, vport);
11346                 if (ret)
11347                         goto clear_nic;
11348
11349                 ret = hclge_init_roce_client_instance(ae_dev, vport);
11350                 if (ret)
11351                         goto clear_roce;
11352
11353                 break;
11354         case HNAE3_CLIENT_ROCE:
11355                 if (hnae3_dev_roce_supported(hdev)) {
11356                         hdev->roce_client = client;
11357                         vport->roce.client = client;
11358                 }
11359
11360                 ret = hclge_init_roce_client_instance(ae_dev, vport);
11361                 if (ret)
11362                         goto clear_roce;
11363
11364                 break;
11365         default:
11366                 return -EINVAL;
11367         }
11368
11369         return 0;
11370
11371 clear_nic:
11372         hdev->nic_client = NULL;
11373         vport->nic.client = NULL;
11374         return ret;
11375 clear_roce:
11376         hdev->roce_client = NULL;
11377         vport->roce.client = NULL;
11378         return ret;
11379 }
11380
11381 static void hclge_uninit_client_instance(struct hnae3_client *client,
11382                                          struct hnae3_ae_dev *ae_dev)
11383 {
11384         struct hclge_dev *hdev = ae_dev->priv;
11385         struct hclge_vport *vport = &hdev->vport[0];
11386
11387         if (hdev->roce_client) {
11388                 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11389                 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11390                         msleep(HCLGE_WAIT_RESET_DONE);
11391
11392                 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11393                 hdev->roce_client = NULL;
11394                 vport->roce.client = NULL;
11395         }
11396         if (client->type == HNAE3_CLIENT_ROCE)
11397                 return;
11398         if (hdev->nic_client && client->ops->uninit_instance) {
11399                 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11400                 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11401                         msleep(HCLGE_WAIT_RESET_DONE);
11402
11403                 client->ops->uninit_instance(&vport->nic, 0);
11404                 hdev->nic_client = NULL;
11405                 vport->nic.client = NULL;
11406         }
11407 }
11408
11409 static int hclge_dev_mem_map(struct hclge_dev *hdev)
11410 {
11411 #define HCLGE_MEM_BAR           4
11412
11413         struct pci_dev *pdev = hdev->pdev;
11414         struct hclge_hw *hw = &hdev->hw;
11415
11416         /* for device does not have device memory, return directly */
11417         if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
11418                 return 0;
11419
11420         hw->mem_base = devm_ioremap_wc(&pdev->dev,
11421                                        pci_resource_start(pdev, HCLGE_MEM_BAR),
11422                                        pci_resource_len(pdev, HCLGE_MEM_BAR));
11423         if (!hw->mem_base) {
11424                 dev_err(&pdev->dev, "failed to map device memory\n");
11425                 return -EFAULT;
11426         }
11427
11428         return 0;
11429 }
11430
11431 static int hclge_pci_init(struct hclge_dev *hdev)
11432 {
11433         struct pci_dev *pdev = hdev->pdev;
11434         struct hclge_hw *hw;
11435         int ret;
11436
11437         ret = pci_enable_device(pdev);
11438         if (ret) {
11439                 dev_err(&pdev->dev, "failed to enable PCI device\n");
11440                 return ret;
11441         }
11442
11443         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11444         if (ret) {
11445                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11446                 if (ret) {
11447                         dev_err(&pdev->dev,
11448                                 "can't set consistent PCI DMA");
11449                         goto err_disable_device;
11450                 }
11451                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11452         }
11453
11454         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11455         if (ret) {
11456                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11457                 goto err_disable_device;
11458         }
11459
11460         pci_set_master(pdev);
11461         hw = &hdev->hw;
11462         hw->io_base = pcim_iomap(pdev, 2, 0);
11463         if (!hw->io_base) {
11464                 dev_err(&pdev->dev, "Can't map configuration register space\n");
11465                 ret = -ENOMEM;
11466                 goto err_clr_master;
11467         }
11468
11469         ret = hclge_dev_mem_map(hdev);
11470         if (ret)
11471                 goto err_unmap_io_base;
11472
11473         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11474
11475         return 0;
11476
11477 err_unmap_io_base:
11478         pcim_iounmap(pdev, hdev->hw.io_base);
11479 err_clr_master:
11480         pci_clear_master(pdev);
11481         pci_release_regions(pdev);
11482 err_disable_device:
11483         pci_disable_device(pdev);
11484
11485         return ret;
11486 }
11487
11488 static void hclge_pci_uninit(struct hclge_dev *hdev)
11489 {
11490         struct pci_dev *pdev = hdev->pdev;
11491
11492         if (hdev->hw.mem_base)
11493                 devm_iounmap(&pdev->dev, hdev->hw.mem_base);
11494
11495         pcim_iounmap(pdev, hdev->hw.io_base);
11496         pci_free_irq_vectors(pdev);
11497         pci_clear_master(pdev);
11498         pci_release_mem_regions(pdev);
11499         pci_disable_device(pdev);
11500 }
11501
11502 static void hclge_state_init(struct hclge_dev *hdev)
11503 {
11504         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11505         set_bit(HCLGE_STATE_DOWN, &hdev->state);
11506         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11507         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11508         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11509         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11510         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11511 }
11512
11513 static void hclge_state_uninit(struct hclge_dev *hdev)
11514 {
11515         set_bit(HCLGE_STATE_DOWN, &hdev->state);
11516         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11517
11518         if (hdev->reset_timer.function)
11519                 del_timer_sync(&hdev->reset_timer);
11520         if (hdev->service_task.work.func)
11521                 cancel_delayed_work_sync(&hdev->service_task);
11522 }
11523
11524 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
11525                                         enum hnae3_reset_type rst_type)
11526 {
11527 #define HCLGE_RESET_RETRY_WAIT_MS       500
11528 #define HCLGE_RESET_RETRY_CNT   5
11529
11530         struct hclge_dev *hdev = ae_dev->priv;
11531         int retry_cnt = 0;
11532         int ret;
11533
11534 retry:
11535         down(&hdev->reset_sem);
11536         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11537         hdev->reset_type = rst_type;
11538         ret = hclge_reset_prepare(hdev);
11539         if (ret || hdev->reset_pending) {
11540                 dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
11541                         ret);
11542                 if (hdev->reset_pending ||
11543                     retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
11544                         dev_err(&hdev->pdev->dev,
11545                                 "reset_pending:0x%lx, retry_cnt:%d\n",
11546                                 hdev->reset_pending, retry_cnt);
11547                         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11548                         up(&hdev->reset_sem);
11549                         msleep(HCLGE_RESET_RETRY_WAIT_MS);
11550                         goto retry;
11551                 }
11552         }
11553
11554         /* disable misc vector before reset done */
11555         hclge_enable_vector(&hdev->misc_vector, false);
11556         set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
11557
11558         if (hdev->reset_type == HNAE3_FLR_RESET)
11559                 hdev->rst_stats.flr_rst_cnt++;
11560 }
11561
11562 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
11563 {
11564         struct hclge_dev *hdev = ae_dev->priv;
11565         int ret;
11566
11567         hclge_enable_vector(&hdev->misc_vector, true);
11568
11569         ret = hclge_reset_rebuild(hdev);
11570         if (ret)
11571                 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11572
11573         hdev->reset_type = HNAE3_NONE_RESET;
11574         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11575         up(&hdev->reset_sem);
11576 }
11577
11578 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11579 {
11580         u16 i;
11581
11582         for (i = 0; i < hdev->num_alloc_vport; i++) {
11583                 struct hclge_vport *vport = &hdev->vport[i];
11584                 int ret;
11585
11586                  /* Send cmd to clear vport's FUNC_RST_ING */
11587                 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11588                 if (ret)
11589                         dev_warn(&hdev->pdev->dev,
11590                                  "clear vport(%u) rst failed %d!\n",
11591                                  vport->vport_id, ret);
11592         }
11593 }
11594
11595 static int hclge_clear_hw_resource(struct hclge_dev *hdev)
11596 {
11597         struct hclge_desc desc;
11598         int ret;
11599
11600         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
11601
11602         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11603         /* This new command is only supported by new firmware, it will
11604          * fail with older firmware. Error value -EOPNOSUPP can only be
11605          * returned by older firmware running this command, to keep code
11606          * backward compatible we will override this value and return
11607          * success.
11608          */
11609         if (ret && ret != -EOPNOTSUPP) {
11610                 dev_err(&hdev->pdev->dev,
11611                         "failed to clear hw resource, ret = %d\n", ret);
11612                 return ret;
11613         }
11614         return 0;
11615 }
11616
11617 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
11618 {
11619         if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11620                 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
11621 }
11622
11623 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
11624 {
11625         if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11626                 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
11627 }
11628
11629 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11630 {
11631         struct pci_dev *pdev = ae_dev->pdev;
11632         struct hclge_dev *hdev;
11633         int ret;
11634
11635         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11636         if (!hdev)
11637                 return -ENOMEM;
11638
11639         hdev->pdev = pdev;
11640         hdev->ae_dev = ae_dev;
11641         hdev->reset_type = HNAE3_NONE_RESET;
11642         hdev->reset_level = HNAE3_FUNC_RESET;
11643         ae_dev->priv = hdev;
11644
11645         /* HW supprt 2 layer vlan */
11646         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11647
11648         mutex_init(&hdev->vport_lock);
11649         spin_lock_init(&hdev->fd_rule_lock);
11650         sema_init(&hdev->reset_sem, 1);
11651
11652         ret = hclge_pci_init(hdev);
11653         if (ret)
11654                 goto out;
11655
11656         ret = hclge_devlink_init(hdev);
11657         if (ret)
11658                 goto err_pci_uninit;
11659
11660         /* Firmware command queue initialize */
11661         ret = hclge_cmd_queue_init(hdev);
11662         if (ret)
11663                 goto err_devlink_uninit;
11664
11665         /* Firmware command initialize */
11666         ret = hclge_cmd_init(hdev);
11667         if (ret)
11668                 goto err_cmd_uninit;
11669
11670         ret  = hclge_clear_hw_resource(hdev);
11671         if (ret)
11672                 goto err_cmd_uninit;
11673
11674         ret = hclge_get_cap(hdev);
11675         if (ret)
11676                 goto err_cmd_uninit;
11677
11678         ret = hclge_query_dev_specs(hdev);
11679         if (ret) {
11680                 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11681                         ret);
11682                 goto err_cmd_uninit;
11683         }
11684
11685         ret = hclge_configure(hdev);
11686         if (ret) {
11687                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11688                 goto err_cmd_uninit;
11689         }
11690
11691         ret = hclge_init_msi(hdev);
11692         if (ret) {
11693                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11694                 goto err_cmd_uninit;
11695         }
11696
11697         ret = hclge_misc_irq_init(hdev);
11698         if (ret)
11699                 goto err_msi_uninit;
11700
11701         ret = hclge_alloc_tqps(hdev);
11702         if (ret) {
11703                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11704                 goto err_msi_irq_uninit;
11705         }
11706
11707         ret = hclge_alloc_vport(hdev);
11708         if (ret)
11709                 goto err_msi_irq_uninit;
11710
11711         ret = hclge_map_tqp(hdev);
11712         if (ret)
11713                 goto err_msi_irq_uninit;
11714
11715         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
11716             !hnae3_dev_phy_imp_supported(hdev)) {
11717                 ret = hclge_mac_mdio_config(hdev);
11718                 if (ret)
11719                         goto err_msi_irq_uninit;
11720         }
11721
11722         ret = hclge_init_umv_space(hdev);
11723         if (ret)
11724                 goto err_mdiobus_unreg;
11725
11726         ret = hclge_mac_init(hdev);
11727         if (ret) {
11728                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11729                 goto err_mdiobus_unreg;
11730         }
11731
11732         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11733         if (ret) {
11734                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11735                 goto err_mdiobus_unreg;
11736         }
11737
11738         ret = hclge_config_gro(hdev);
11739         if (ret)
11740                 goto err_mdiobus_unreg;
11741
11742         ret = hclge_init_vlan_config(hdev);
11743         if (ret) {
11744                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11745                 goto err_mdiobus_unreg;
11746         }
11747
11748         ret = hclge_tm_schd_init(hdev);
11749         if (ret) {
11750                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11751                 goto err_mdiobus_unreg;
11752         }
11753
11754         ret = hclge_rss_init_cfg(hdev);
11755         if (ret) {
11756                 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11757                 goto err_mdiobus_unreg;
11758         }
11759
11760         ret = hclge_rss_init_hw(hdev);
11761         if (ret) {
11762                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11763                 goto err_mdiobus_unreg;
11764         }
11765
11766         ret = init_mgr_tbl(hdev);
11767         if (ret) {
11768                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11769                 goto err_mdiobus_unreg;
11770         }
11771
11772         ret = hclge_init_fd_config(hdev);
11773         if (ret) {
11774                 dev_err(&pdev->dev,
11775                         "fd table init fail, ret=%d\n", ret);
11776                 goto err_mdiobus_unreg;
11777         }
11778
11779         ret = hclge_ptp_init(hdev);
11780         if (ret)
11781                 goto err_mdiobus_unreg;
11782
11783         INIT_KFIFO(hdev->mac_tnl_log);
11784
11785         hclge_dcb_ops_set(hdev);
11786
11787         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11788         INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11789
11790         /* Setup affinity after service timer setup because add_timer_on
11791          * is called in affinity notify.
11792          */
11793         hclge_misc_affinity_setup(hdev);
11794
11795         hclge_clear_all_event_cause(hdev);
11796         hclge_clear_resetting_state(hdev);
11797
11798         /* Log and clear the hw errors those already occurred */
11799         if (hnae3_dev_ras_imp_supported(hdev))
11800                 hclge_handle_occurred_error(hdev);
11801         else
11802                 hclge_handle_all_hns_hw_errors(ae_dev);
11803
11804         /* request delayed reset for the error recovery because an immediate
11805          * global reset on a PF affecting pending initialization of other PFs
11806          */
11807         if (ae_dev->hw_err_reset_req) {
11808                 enum hnae3_reset_type reset_level;
11809
11810                 reset_level = hclge_get_reset_level(ae_dev,
11811                                                     &ae_dev->hw_err_reset_req);
11812                 hclge_set_def_reset_request(ae_dev, reset_level);
11813                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11814         }
11815
11816         hclge_init_rxd_adv_layout(hdev);
11817
11818         /* Enable MISC vector(vector0) */
11819         hclge_enable_vector(&hdev->misc_vector, true);
11820
11821         hclge_state_init(hdev);
11822         hdev->last_reset_time = jiffies;
11823
11824         dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11825                  HCLGE_DRIVER_NAME);
11826
11827         hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11828
11829         return 0;
11830
11831 err_mdiobus_unreg:
11832         if (hdev->hw.mac.phydev)
11833                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
11834 err_msi_irq_uninit:
11835         hclge_misc_irq_uninit(hdev);
11836 err_msi_uninit:
11837         pci_free_irq_vectors(pdev);
11838 err_cmd_uninit:
11839         hclge_cmd_uninit(hdev);
11840 err_devlink_uninit:
11841         hclge_devlink_uninit(hdev);
11842 err_pci_uninit:
11843         pcim_iounmap(pdev, hdev->hw.io_base);
11844         pci_clear_master(pdev);
11845         pci_release_regions(pdev);
11846         pci_disable_device(pdev);
11847 out:
11848         mutex_destroy(&hdev->vport_lock);
11849         return ret;
11850 }
11851
11852 static void hclge_stats_clear(struct hclge_dev *hdev)
11853 {
11854         memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11855 }
11856
11857 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11858 {
11859         return hclge_config_switch_param(hdev, vf, enable,
11860                                          HCLGE_SWITCH_ANTI_SPOOF_MASK);
11861 }
11862
11863 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11864 {
11865         return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11866                                           HCLGE_FILTER_FE_NIC_INGRESS_B,
11867                                           enable, vf);
11868 }
11869
11870 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11871 {
11872         int ret;
11873
11874         ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11875         if (ret) {
11876                 dev_err(&hdev->pdev->dev,
11877                         "Set vf %d mac spoof check %s failed, ret=%d\n",
11878                         vf, enable ? "on" : "off", ret);
11879                 return ret;
11880         }
11881
11882         ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11883         if (ret)
11884                 dev_err(&hdev->pdev->dev,
11885                         "Set vf %d vlan spoof check %s failed, ret=%d\n",
11886                         vf, enable ? "on" : "off", ret);
11887
11888         return ret;
11889 }
11890
11891 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11892                                  bool enable)
11893 {
11894         struct hclge_vport *vport = hclge_get_vport(handle);
11895         struct hclge_dev *hdev = vport->back;
11896         u32 new_spoofchk = enable ? 1 : 0;
11897         int ret;
11898
11899         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11900                 return -EOPNOTSUPP;
11901
11902         vport = hclge_get_vf_vport(hdev, vf);
11903         if (!vport)
11904                 return -EINVAL;
11905
11906         if (vport->vf_info.spoofchk == new_spoofchk)
11907                 return 0;
11908
11909         if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11910                 dev_warn(&hdev->pdev->dev,
11911                          "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11912                          vf);
11913         else if (enable && hclge_is_umv_space_full(vport, true))
11914                 dev_warn(&hdev->pdev->dev,
11915                          "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11916                          vf);
11917
11918         ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11919         if (ret)
11920                 return ret;
11921
11922         vport->vf_info.spoofchk = new_spoofchk;
11923         return 0;
11924 }
11925
11926 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11927 {
11928         struct hclge_vport *vport = hdev->vport;
11929         int ret;
11930         int i;
11931
11932         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11933                 return 0;
11934
11935         /* resume the vf spoof check state after reset */
11936         for (i = 0; i < hdev->num_alloc_vport; i++) {
11937                 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11938                                                vport->vf_info.spoofchk);
11939                 if (ret)
11940                         return ret;
11941
11942                 vport++;
11943         }
11944
11945         return 0;
11946 }
11947
11948 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11949 {
11950         struct hclge_vport *vport = hclge_get_vport(handle);
11951         struct hclge_dev *hdev = vport->back;
11952         u32 new_trusted = enable ? 1 : 0;
11953
11954         vport = hclge_get_vf_vport(hdev, vf);
11955         if (!vport)
11956                 return -EINVAL;
11957
11958         if (vport->vf_info.trusted == new_trusted)
11959                 return 0;
11960
11961         vport->vf_info.trusted = new_trusted;
11962         set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
11963         hclge_task_schedule(hdev, 0);
11964
11965         return 0;
11966 }
11967
11968 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11969 {
11970         int ret;
11971         int vf;
11972
11973         /* reset vf rate to default value */
11974         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11975                 struct hclge_vport *vport = &hdev->vport[vf];
11976
11977                 vport->vf_info.max_tx_rate = 0;
11978                 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11979                 if (ret)
11980                         dev_err(&hdev->pdev->dev,
11981                                 "vf%d failed to reset to default, ret=%d\n",
11982                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
11983         }
11984 }
11985
11986 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11987                                      int min_tx_rate, int max_tx_rate)
11988 {
11989         if (min_tx_rate != 0 ||
11990             max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11991                 dev_err(&hdev->pdev->dev,
11992                         "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11993                         min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11994                 return -EINVAL;
11995         }
11996
11997         return 0;
11998 }
11999
12000 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
12001                              int min_tx_rate, int max_tx_rate, bool force)
12002 {
12003         struct hclge_vport *vport = hclge_get_vport(handle);
12004         struct hclge_dev *hdev = vport->back;
12005         int ret;
12006
12007         ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
12008         if (ret)
12009                 return ret;
12010
12011         vport = hclge_get_vf_vport(hdev, vf);
12012         if (!vport)
12013                 return -EINVAL;
12014
12015         if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
12016                 return 0;
12017
12018         ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
12019         if (ret)
12020                 return ret;
12021
12022         vport->vf_info.max_tx_rate = max_tx_rate;
12023
12024         return 0;
12025 }
12026
12027 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
12028 {
12029         struct hnae3_handle *handle = &hdev->vport->nic;
12030         struct hclge_vport *vport;
12031         int ret;
12032         int vf;
12033
12034         /* resume the vf max_tx_rate after reset */
12035         for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
12036                 vport = hclge_get_vf_vport(hdev, vf);
12037                 if (!vport)
12038                         return -EINVAL;
12039
12040                 /* zero means max rate, after reset, firmware already set it to
12041                  * max rate, so just continue.
12042                  */
12043                 if (!vport->vf_info.max_tx_rate)
12044                         continue;
12045
12046                 ret = hclge_set_vf_rate(handle, vf, 0,
12047                                         vport->vf_info.max_tx_rate, true);
12048                 if (ret) {
12049                         dev_err(&hdev->pdev->dev,
12050                                 "vf%d failed to resume tx_rate:%u, ret=%d\n",
12051                                 vf, vport->vf_info.max_tx_rate, ret);
12052                         return ret;
12053                 }
12054         }
12055
12056         return 0;
12057 }
12058
12059 static void hclge_reset_vport_state(struct hclge_dev *hdev)
12060 {
12061         struct hclge_vport *vport = hdev->vport;
12062         int i;
12063
12064         for (i = 0; i < hdev->num_alloc_vport; i++) {
12065                 hclge_vport_stop(vport);
12066                 vport++;
12067         }
12068 }
12069
12070 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
12071 {
12072         struct hclge_dev *hdev = ae_dev->priv;
12073         struct pci_dev *pdev = ae_dev->pdev;
12074         int ret;
12075
12076         set_bit(HCLGE_STATE_DOWN, &hdev->state);
12077
12078         hclge_stats_clear(hdev);
12079         /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
12080          * so here should not clean table in memory.
12081          */
12082         if (hdev->reset_type == HNAE3_IMP_RESET ||
12083             hdev->reset_type == HNAE3_GLOBAL_RESET) {
12084                 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
12085                 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
12086                 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
12087                 hclge_reset_umv_space(hdev);
12088         }
12089
12090         ret = hclge_cmd_init(hdev);
12091         if (ret) {
12092                 dev_err(&pdev->dev, "Cmd queue init failed\n");
12093                 return ret;
12094         }
12095
12096         ret = hclge_map_tqp(hdev);
12097         if (ret) {
12098                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
12099                 return ret;
12100         }
12101
12102         ret = hclge_mac_init(hdev);
12103         if (ret) {
12104                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
12105                 return ret;
12106         }
12107
12108         ret = hclge_tp_port_init(hdev);
12109         if (ret) {
12110                 dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
12111                         ret);
12112                 return ret;
12113         }
12114
12115         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
12116         if (ret) {
12117                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
12118                 return ret;
12119         }
12120
12121         ret = hclge_config_gro(hdev);
12122         if (ret)
12123                 return ret;
12124
12125         ret = hclge_init_vlan_config(hdev);
12126         if (ret) {
12127                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
12128                 return ret;
12129         }
12130
12131         ret = hclge_tm_init_hw(hdev, true);
12132         if (ret) {
12133                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
12134                 return ret;
12135         }
12136
12137         ret = hclge_rss_init_hw(hdev);
12138         if (ret) {
12139                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
12140                 return ret;
12141         }
12142
12143         ret = init_mgr_tbl(hdev);
12144         if (ret) {
12145                 dev_err(&pdev->dev,
12146                         "failed to reinit manager table, ret = %d\n", ret);
12147                 return ret;
12148         }
12149
12150         ret = hclge_init_fd_config(hdev);
12151         if (ret) {
12152                 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
12153                 return ret;
12154         }
12155
12156         ret = hclge_ptp_init(hdev);
12157         if (ret)
12158                 return ret;
12159
12160         /* Log and clear the hw errors those already occurred */
12161         if (hnae3_dev_ras_imp_supported(hdev))
12162                 hclge_handle_occurred_error(hdev);
12163         else
12164                 hclge_handle_all_hns_hw_errors(ae_dev);
12165
12166         /* Re-enable the hw error interrupts because
12167          * the interrupts get disabled on global reset.
12168          */
12169         ret = hclge_config_nic_hw_error(hdev, true);
12170         if (ret) {
12171                 dev_err(&pdev->dev,
12172                         "fail(%d) to re-enable NIC hw error interrupts\n",
12173                         ret);
12174                 return ret;
12175         }
12176
12177         if (hdev->roce_client) {
12178                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
12179                 if (ret) {
12180                         dev_err(&pdev->dev,
12181                                 "fail(%d) to re-enable roce ras interrupts\n",
12182                                 ret);
12183                         return ret;
12184                 }
12185         }
12186
12187         hclge_reset_vport_state(hdev);
12188         ret = hclge_reset_vport_spoofchk(hdev);
12189         if (ret)
12190                 return ret;
12191
12192         ret = hclge_resume_vf_rate(hdev);
12193         if (ret)
12194                 return ret;
12195
12196         hclge_init_rxd_adv_layout(hdev);
12197
12198         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
12199                  HCLGE_DRIVER_NAME);
12200
12201         return 0;
12202 }
12203
12204 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
12205 {
12206         struct hclge_dev *hdev = ae_dev->priv;
12207         struct hclge_mac *mac = &hdev->hw.mac;
12208
12209         hclge_reset_vf_rate(hdev);
12210         hclge_clear_vf_vlan(hdev);
12211         hclge_misc_affinity_teardown(hdev);
12212         hclge_state_uninit(hdev);
12213         hclge_ptp_uninit(hdev);
12214         hclge_uninit_rxd_adv_layout(hdev);
12215         hclge_uninit_mac_table(hdev);
12216         hclge_del_all_fd_entries(hdev);
12217
12218         if (mac->phydev)
12219                 mdiobus_unregister(mac->mdio_bus);
12220
12221         /* Disable MISC vector(vector0) */
12222         hclge_enable_vector(&hdev->misc_vector, false);
12223         synchronize_irq(hdev->misc_vector.vector_irq);
12224
12225         /* Disable all hw interrupts */
12226         hclge_config_mac_tnl_int(hdev, false);
12227         hclge_config_nic_hw_error(hdev, false);
12228         hclge_config_rocee_ras_interrupt(hdev, false);
12229
12230         hclge_cmd_uninit(hdev);
12231         hclge_misc_irq_uninit(hdev);
12232         hclge_devlink_uninit(hdev);
12233         hclge_pci_uninit(hdev);
12234         mutex_destroy(&hdev->vport_lock);
12235         hclge_uninit_vport_vlan_table(hdev);
12236         ae_dev->priv = NULL;
12237 }
12238
12239 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
12240 {
12241         struct hclge_vport *vport = hclge_get_vport(handle);
12242         struct hclge_dev *hdev = vport->back;
12243
12244         return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
12245 }
12246
12247 static void hclge_get_channels(struct hnae3_handle *handle,
12248                                struct ethtool_channels *ch)
12249 {
12250         ch->max_combined = hclge_get_max_channels(handle);
12251         ch->other_count = 1;
12252         ch->max_other = 1;
12253         ch->combined_count = handle->kinfo.rss_size;
12254 }
12255
12256 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
12257                                         u16 *alloc_tqps, u16 *max_rss_size)
12258 {
12259         struct hclge_vport *vport = hclge_get_vport(handle);
12260         struct hclge_dev *hdev = vport->back;
12261
12262         *alloc_tqps = vport->alloc_tqps;
12263         *max_rss_size = hdev->pf_rss_size_max;
12264 }
12265
12266 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
12267                               bool rxfh_configured)
12268 {
12269         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
12270         struct hclge_vport *vport = hclge_get_vport(handle);
12271         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
12272         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
12273         struct hclge_dev *hdev = vport->back;
12274         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
12275         u16 cur_rss_size = kinfo->rss_size;
12276         u16 cur_tqps = kinfo->num_tqps;
12277         u16 tc_valid[HCLGE_MAX_TC_NUM];
12278         u16 roundup_size;
12279         u32 *rss_indir;
12280         unsigned int i;
12281         int ret;
12282
12283         kinfo->req_rss_size = new_tqps_num;
12284
12285         ret = hclge_tm_vport_map_update(hdev);
12286         if (ret) {
12287                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
12288                 return ret;
12289         }
12290
12291         roundup_size = roundup_pow_of_two(kinfo->rss_size);
12292         roundup_size = ilog2(roundup_size);
12293         /* Set the RSS TC mode according to the new RSS size */
12294         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
12295                 tc_valid[i] = 0;
12296
12297                 if (!(hdev->hw_tc_map & BIT(i)))
12298                         continue;
12299
12300                 tc_valid[i] = 1;
12301                 tc_size[i] = roundup_size;
12302                 tc_offset[i] = kinfo->rss_size * i;
12303         }
12304         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
12305         if (ret)
12306                 return ret;
12307
12308         /* RSS indirection table has been configured by user */
12309         if (rxfh_configured)
12310                 goto out;
12311
12312         /* Reinitializes the rss indirect table according to the new RSS size */
12313         rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
12314                             GFP_KERNEL);
12315         if (!rss_indir)
12316                 return -ENOMEM;
12317
12318         for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
12319                 rss_indir[i] = i % kinfo->rss_size;
12320
12321         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
12322         if (ret)
12323                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
12324                         ret);
12325
12326         kfree(rss_indir);
12327
12328 out:
12329         if (!ret)
12330                 dev_info(&hdev->pdev->dev,
12331                          "Channels changed, rss_size from %u to %u, tqps from %u to %u",
12332                          cur_rss_size, kinfo->rss_size,
12333                          cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
12334
12335         return ret;
12336 }
12337
12338 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
12339                               u32 *regs_num_64_bit)
12340 {
12341         struct hclge_desc desc;
12342         u32 total_num;
12343         int ret;
12344
12345         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
12346         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12347         if (ret) {
12348                 dev_err(&hdev->pdev->dev,
12349                         "Query register number cmd failed, ret = %d.\n", ret);
12350                 return ret;
12351         }
12352
12353         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
12354         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
12355
12356         total_num = *regs_num_32_bit + *regs_num_64_bit;
12357         if (!total_num)
12358                 return -EINVAL;
12359
12360         return 0;
12361 }
12362
12363 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12364                                  void *data)
12365 {
12366 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
12367 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
12368
12369         struct hclge_desc *desc;
12370         u32 *reg_val = data;
12371         __le32 *desc_data;
12372         int nodata_num;
12373         int cmd_num;
12374         int i, k, n;
12375         int ret;
12376
12377         if (regs_num == 0)
12378                 return 0;
12379
12380         nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
12381         cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
12382                                HCLGE_32_BIT_REG_RTN_DATANUM);
12383         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12384         if (!desc)
12385                 return -ENOMEM;
12386
12387         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
12388         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12389         if (ret) {
12390                 dev_err(&hdev->pdev->dev,
12391                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
12392                 kfree(desc);
12393                 return ret;
12394         }
12395
12396         for (i = 0; i < cmd_num; i++) {
12397                 if (i == 0) {
12398                         desc_data = (__le32 *)(&desc[i].data[0]);
12399                         n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
12400                 } else {
12401                         desc_data = (__le32 *)(&desc[i]);
12402                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
12403                 }
12404                 for (k = 0; k < n; k++) {
12405                         *reg_val++ = le32_to_cpu(*desc_data++);
12406
12407                         regs_num--;
12408                         if (!regs_num)
12409                                 break;
12410                 }
12411         }
12412
12413         kfree(desc);
12414         return 0;
12415 }
12416
12417 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12418                                  void *data)
12419 {
12420 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
12421 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
12422
12423         struct hclge_desc *desc;
12424         u64 *reg_val = data;
12425         __le64 *desc_data;
12426         int nodata_len;
12427         int cmd_num;
12428         int i, k, n;
12429         int ret;
12430
12431         if (regs_num == 0)
12432                 return 0;
12433
12434         nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
12435         cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
12436                                HCLGE_64_BIT_REG_RTN_DATANUM);
12437         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12438         if (!desc)
12439                 return -ENOMEM;
12440
12441         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
12442         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12443         if (ret) {
12444                 dev_err(&hdev->pdev->dev,
12445                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
12446                 kfree(desc);
12447                 return ret;
12448         }
12449
12450         for (i = 0; i < cmd_num; i++) {
12451                 if (i == 0) {
12452                         desc_data = (__le64 *)(&desc[i].data[0]);
12453                         n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
12454                 } else {
12455                         desc_data = (__le64 *)(&desc[i]);
12456                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
12457                 }
12458                 for (k = 0; k < n; k++) {
12459                         *reg_val++ = le64_to_cpu(*desc_data++);
12460
12461                         regs_num--;
12462                         if (!regs_num)
12463                                 break;
12464                 }
12465         }
12466
12467         kfree(desc);
12468         return 0;
12469 }
12470
12471 #define MAX_SEPARATE_NUM        4
12472 #define SEPARATOR_VALUE         0xFDFCFBFA
12473 #define REG_NUM_PER_LINE        4
12474 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
12475 #define REG_SEPARATOR_LINE      1
12476 #define REG_NUM_REMAIN_MASK     3
12477
12478 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
12479 {
12480         int i;
12481
12482         /* initialize command BD except the last one */
12483         for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
12484                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
12485                                            true);
12486                 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12487         }
12488
12489         /* initialize the last command BD */
12490         hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
12491
12492         return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
12493 }
12494
12495 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
12496                                     int *bd_num_list,
12497                                     u32 type_num)
12498 {
12499         u32 entries_per_desc, desc_index, index, offset, i;
12500         struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
12501         int ret;
12502
12503         ret = hclge_query_bd_num_cmd_send(hdev, desc);
12504         if (ret) {
12505                 dev_err(&hdev->pdev->dev,
12506                         "Get dfx bd num fail, status is %d.\n", ret);
12507                 return ret;
12508         }
12509
12510         entries_per_desc = ARRAY_SIZE(desc[0].data);
12511         for (i = 0; i < type_num; i++) {
12512                 offset = hclge_dfx_bd_offset_list[i];
12513                 index = offset % entries_per_desc;
12514                 desc_index = offset / entries_per_desc;
12515                 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
12516         }
12517
12518         return ret;
12519 }
12520
12521 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
12522                                   struct hclge_desc *desc_src, int bd_num,
12523                                   enum hclge_opcode_type cmd)
12524 {
12525         struct hclge_desc *desc = desc_src;
12526         int i, ret;
12527
12528         hclge_cmd_setup_basic_desc(desc, cmd, true);
12529         for (i = 0; i < bd_num - 1; i++) {
12530                 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12531                 desc++;
12532                 hclge_cmd_setup_basic_desc(desc, cmd, true);
12533         }
12534
12535         desc = desc_src;
12536         ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
12537         if (ret)
12538                 dev_err(&hdev->pdev->dev,
12539                         "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
12540                         cmd, ret);
12541
12542         return ret;
12543 }
12544
12545 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
12546                                     void *data)
12547 {
12548         int entries_per_desc, reg_num, separator_num, desc_index, index, i;
12549         struct hclge_desc *desc = desc_src;
12550         u32 *reg = data;
12551
12552         entries_per_desc = ARRAY_SIZE(desc->data);
12553         reg_num = entries_per_desc * bd_num;
12554         separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
12555         for (i = 0; i < reg_num; i++) {
12556                 index = i % entries_per_desc;
12557                 desc_index = i / entries_per_desc;
12558                 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
12559         }
12560         for (i = 0; i < separator_num; i++)
12561                 *reg++ = SEPARATOR_VALUE;
12562
12563         return reg_num + separator_num;
12564 }
12565
12566 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
12567 {
12568         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12569         int data_len_per_desc, bd_num, i;
12570         int *bd_num_list;
12571         u32 data_len;
12572         int ret;
12573
12574         bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12575         if (!bd_num_list)
12576                 return -ENOMEM;
12577
12578         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12579         if (ret) {
12580                 dev_err(&hdev->pdev->dev,
12581                         "Get dfx reg bd num fail, status is %d.\n", ret);
12582                 goto out;
12583         }
12584
12585         data_len_per_desc = sizeof_field(struct hclge_desc, data);
12586         *len = 0;
12587         for (i = 0; i < dfx_reg_type_num; i++) {
12588                 bd_num = bd_num_list[i];
12589                 data_len = data_len_per_desc * bd_num;
12590                 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
12591         }
12592
12593 out:
12594         kfree(bd_num_list);
12595         return ret;
12596 }
12597
12598 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
12599 {
12600         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12601         int bd_num, bd_num_max, buf_len, i;
12602         struct hclge_desc *desc_src;
12603         int *bd_num_list;
12604         u32 *reg = data;
12605         int ret;
12606
12607         bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12608         if (!bd_num_list)
12609                 return -ENOMEM;
12610
12611         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12612         if (ret) {
12613                 dev_err(&hdev->pdev->dev,
12614                         "Get dfx reg bd num fail, status is %d.\n", ret);
12615                 goto out;
12616         }
12617
12618         bd_num_max = bd_num_list[0];
12619         for (i = 1; i < dfx_reg_type_num; i++)
12620                 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
12621
12622         buf_len = sizeof(*desc_src) * bd_num_max;
12623         desc_src = kzalloc(buf_len, GFP_KERNEL);
12624         if (!desc_src) {
12625                 ret = -ENOMEM;
12626                 goto out;
12627         }
12628
12629         for (i = 0; i < dfx_reg_type_num; i++) {
12630                 bd_num = bd_num_list[i];
12631                 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
12632                                              hclge_dfx_reg_opcode_list[i]);
12633                 if (ret) {
12634                         dev_err(&hdev->pdev->dev,
12635                                 "Get dfx reg fail, status is %d.\n", ret);
12636                         break;
12637                 }
12638
12639                 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
12640         }
12641
12642         kfree(desc_src);
12643 out:
12644         kfree(bd_num_list);
12645         return ret;
12646 }
12647
12648 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
12649                               struct hnae3_knic_private_info *kinfo)
12650 {
12651 #define HCLGE_RING_REG_OFFSET           0x200
12652 #define HCLGE_RING_INT_REG_OFFSET       0x4
12653
12654         int i, j, reg_num, separator_num;
12655         int data_num_sum;
12656         u32 *reg = data;
12657
12658         /* fetching per-PF registers valus from PF PCIe register space */
12659         reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
12660         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12661         for (i = 0; i < reg_num; i++)
12662                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
12663         for (i = 0; i < separator_num; i++)
12664                 *reg++ = SEPARATOR_VALUE;
12665         data_num_sum = reg_num + separator_num;
12666
12667         reg_num = ARRAY_SIZE(common_reg_addr_list);
12668         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12669         for (i = 0; i < reg_num; i++)
12670                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
12671         for (i = 0; i < separator_num; i++)
12672                 *reg++ = SEPARATOR_VALUE;
12673         data_num_sum += reg_num + separator_num;
12674
12675         reg_num = ARRAY_SIZE(ring_reg_addr_list);
12676         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12677         for (j = 0; j < kinfo->num_tqps; j++) {
12678                 for (i = 0; i < reg_num; i++)
12679                         *reg++ = hclge_read_dev(&hdev->hw,
12680                                                 ring_reg_addr_list[i] +
12681                                                 HCLGE_RING_REG_OFFSET * j);
12682                 for (i = 0; i < separator_num; i++)
12683                         *reg++ = SEPARATOR_VALUE;
12684         }
12685         data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
12686
12687         reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
12688         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12689         for (j = 0; j < hdev->num_msi_used - 1; j++) {
12690                 for (i = 0; i < reg_num; i++)
12691                         *reg++ = hclge_read_dev(&hdev->hw,
12692                                                 tqp_intr_reg_addr_list[i] +
12693                                                 HCLGE_RING_INT_REG_OFFSET * j);
12694                 for (i = 0; i < separator_num; i++)
12695                         *reg++ = SEPARATOR_VALUE;
12696         }
12697         data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
12698
12699         return data_num_sum;
12700 }
12701
12702 static int hclge_get_regs_len(struct hnae3_handle *handle)
12703 {
12704         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
12705         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12706         struct hclge_vport *vport = hclge_get_vport(handle);
12707         struct hclge_dev *hdev = vport->back;
12708         int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
12709         int regs_lines_32_bit, regs_lines_64_bit;
12710         int ret;
12711
12712         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12713         if (ret) {
12714                 dev_err(&hdev->pdev->dev,
12715                         "Get register number failed, ret = %d.\n", ret);
12716                 return ret;
12717         }
12718
12719         ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
12720         if (ret) {
12721                 dev_err(&hdev->pdev->dev,
12722                         "Get dfx reg len failed, ret = %d.\n", ret);
12723                 return ret;
12724         }
12725
12726         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
12727                 REG_SEPARATOR_LINE;
12728         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
12729                 REG_SEPARATOR_LINE;
12730         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
12731                 REG_SEPARATOR_LINE;
12732         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
12733                 REG_SEPARATOR_LINE;
12734         regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
12735                 REG_SEPARATOR_LINE;
12736         regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
12737                 REG_SEPARATOR_LINE;
12738
12739         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
12740                 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
12741                 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
12742 }
12743
12744 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
12745                            void *data)
12746 {
12747         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12748         struct hclge_vport *vport = hclge_get_vport(handle);
12749         struct hclge_dev *hdev = vport->back;
12750         u32 regs_num_32_bit, regs_num_64_bit;
12751         int i, reg_num, separator_num, ret;
12752         u32 *reg = data;
12753
12754         *version = hdev->fw_version;
12755
12756         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12757         if (ret) {
12758                 dev_err(&hdev->pdev->dev,
12759                         "Get register number failed, ret = %d.\n", ret);
12760                 return;
12761         }
12762
12763         reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
12764
12765         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
12766         if (ret) {
12767                 dev_err(&hdev->pdev->dev,
12768                         "Get 32 bit register failed, ret = %d.\n", ret);
12769                 return;
12770         }
12771         reg_num = regs_num_32_bit;
12772         reg += reg_num;
12773         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12774         for (i = 0; i < separator_num; i++)
12775                 *reg++ = SEPARATOR_VALUE;
12776
12777         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
12778         if (ret) {
12779                 dev_err(&hdev->pdev->dev,
12780                         "Get 64 bit register failed, ret = %d.\n", ret);
12781                 return;
12782         }
12783         reg_num = regs_num_64_bit * 2;
12784         reg += reg_num;
12785         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12786         for (i = 0; i < separator_num; i++)
12787                 *reg++ = SEPARATOR_VALUE;
12788
12789         ret = hclge_get_dfx_reg(hdev, reg);
12790         if (ret)
12791                 dev_err(&hdev->pdev->dev,
12792                         "Get dfx register failed, ret = %d.\n", ret);
12793 }
12794
12795 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12796 {
12797         struct hclge_set_led_state_cmd *req;
12798         struct hclge_desc desc;
12799         int ret;
12800
12801         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12802
12803         req = (struct hclge_set_led_state_cmd *)desc.data;
12804         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12805                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12806
12807         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12808         if (ret)
12809                 dev_err(&hdev->pdev->dev,
12810                         "Send set led state cmd error, ret =%d\n", ret);
12811
12812         return ret;
12813 }
12814
12815 enum hclge_led_status {
12816         HCLGE_LED_OFF,
12817         HCLGE_LED_ON,
12818         HCLGE_LED_NO_CHANGE = 0xFF,
12819 };
12820
12821 static int hclge_set_led_id(struct hnae3_handle *handle,
12822                             enum ethtool_phys_id_state status)
12823 {
12824         struct hclge_vport *vport = hclge_get_vport(handle);
12825         struct hclge_dev *hdev = vport->back;
12826
12827         switch (status) {
12828         case ETHTOOL_ID_ACTIVE:
12829                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
12830         case ETHTOOL_ID_INACTIVE:
12831                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12832         default:
12833                 return -EINVAL;
12834         }
12835 }
12836
12837 static void hclge_get_link_mode(struct hnae3_handle *handle,
12838                                 unsigned long *supported,
12839                                 unsigned long *advertising)
12840 {
12841         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12842         struct hclge_vport *vport = hclge_get_vport(handle);
12843         struct hclge_dev *hdev = vport->back;
12844         unsigned int idx = 0;
12845
12846         for (; idx < size; idx++) {
12847                 supported[idx] = hdev->hw.mac.supported[idx];
12848                 advertising[idx] = hdev->hw.mac.advertising[idx];
12849         }
12850 }
12851
12852 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12853 {
12854         struct hclge_vport *vport = hclge_get_vport(handle);
12855         struct hclge_dev *hdev = vport->back;
12856         bool gro_en_old = hdev->gro_en;
12857         int ret;
12858
12859         hdev->gro_en = enable;
12860         ret = hclge_config_gro(hdev);
12861         if (ret)
12862                 hdev->gro_en = gro_en_old;
12863
12864         return ret;
12865 }
12866
12867 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12868 {
12869         struct hclge_vport *vport = &hdev->vport[0];
12870         struct hnae3_handle *handle = &vport->nic;
12871         u8 tmp_flags;
12872         int ret;
12873         u16 i;
12874
12875         if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12876                 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12877                 vport->last_promisc_flags = vport->overflow_promisc_flags;
12878         }
12879
12880         if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
12881                 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12882                 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12883                                              tmp_flags & HNAE3_MPE);
12884                 if (!ret) {
12885                         clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12886                                   &vport->state);
12887                         set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
12888                                 &vport->state);
12889                 }
12890         }
12891
12892         for (i = 1; i < hdev->num_alloc_vport; i++) {
12893                 bool uc_en = false;
12894                 bool mc_en = false;
12895                 bool bc_en;
12896
12897                 vport = &hdev->vport[i];
12898
12899                 if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12900                                         &vport->state))
12901                         continue;
12902
12903                 if (vport->vf_info.trusted) {
12904                         uc_en = vport->vf_info.request_uc_en > 0 ||
12905                                 vport->overflow_promisc_flags &
12906                                 HNAE3_OVERFLOW_UPE;
12907                         mc_en = vport->vf_info.request_mc_en > 0 ||
12908                                 vport->overflow_promisc_flags &
12909                                 HNAE3_OVERFLOW_MPE;
12910                 }
12911                 bc_en = vport->vf_info.request_bc_en > 0;
12912
12913                 ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
12914                                                  mc_en, bc_en);
12915                 if (ret) {
12916                         set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12917                                 &vport->state);
12918                         return;
12919                 }
12920                 hclge_set_vport_vlan_fltr_change(vport);
12921         }
12922 }
12923
12924 static bool hclge_module_existed(struct hclge_dev *hdev)
12925 {
12926         struct hclge_desc desc;
12927         u32 existed;
12928         int ret;
12929
12930         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12931         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12932         if (ret) {
12933                 dev_err(&hdev->pdev->dev,
12934                         "failed to get SFP exist state, ret = %d\n", ret);
12935                 return false;
12936         }
12937
12938         existed = le32_to_cpu(desc.data[0]);
12939
12940         return existed != 0;
12941 }
12942
12943 /* need 6 bds(total 140 bytes) in one reading
12944  * return the number of bytes actually read, 0 means read failed.
12945  */
12946 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12947                                      u32 len, u8 *data)
12948 {
12949         struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12950         struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12951         u16 read_len;
12952         u16 copy_len;
12953         int ret;
12954         int i;
12955
12956         /* setup all 6 bds to read module eeprom info. */
12957         for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12958                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12959                                            true);
12960
12961                 /* bd0~bd4 need next flag */
12962                 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12963                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12964         }
12965
12966         /* setup bd0, this bd contains offset and read length. */
12967         sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12968         sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12969         read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12970         sfp_info_bd0->read_len = cpu_to_le16(read_len);
12971
12972         ret = hclge_cmd_send(&hdev->hw, desc, i);
12973         if (ret) {
12974                 dev_err(&hdev->pdev->dev,
12975                         "failed to get SFP eeprom info, ret = %d\n", ret);
12976                 return 0;
12977         }
12978
12979         /* copy sfp info from bd0 to out buffer. */
12980         copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12981         memcpy(data, sfp_info_bd0->data, copy_len);
12982         read_len = copy_len;
12983
12984         /* copy sfp info from bd1~bd5 to out buffer if needed. */
12985         for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12986                 if (read_len >= len)
12987                         return read_len;
12988
12989                 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12990                 memcpy(data + read_len, desc[i].data, copy_len);
12991                 read_len += copy_len;
12992         }
12993
12994         return read_len;
12995 }
12996
12997 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12998                                    u32 len, u8 *data)
12999 {
13000         struct hclge_vport *vport = hclge_get_vport(handle);
13001         struct hclge_dev *hdev = vport->back;
13002         u32 read_len = 0;
13003         u16 data_len;
13004
13005         if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
13006                 return -EOPNOTSUPP;
13007
13008         if (!hclge_module_existed(hdev))
13009                 return -ENXIO;
13010
13011         while (read_len < len) {
13012                 data_len = hclge_get_sfp_eeprom_info(hdev,
13013                                                      offset + read_len,
13014                                                      len - read_len,
13015                                                      data + read_len);
13016                 if (!data_len)
13017                         return -EIO;
13018
13019                 read_len += data_len;
13020         }
13021
13022         return 0;
13023 }
13024
13025 static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle,
13026                                          u32 *status_code)
13027 {
13028         struct hclge_vport *vport = hclge_get_vport(handle);
13029         struct hclge_dev *hdev = vport->back;
13030         struct hclge_desc desc;
13031         int ret;
13032
13033         if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2)
13034                 return -EOPNOTSUPP;
13035
13036         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true);
13037         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
13038         if (ret) {
13039                 dev_err(&hdev->pdev->dev,
13040                         "failed to query link diagnosis info, ret = %d\n", ret);
13041                 return ret;
13042         }
13043
13044         *status_code = le32_to_cpu(desc.data[0]);
13045         return 0;
13046 }
13047
13048 static const struct hnae3_ae_ops hclge_ops = {
13049         .init_ae_dev = hclge_init_ae_dev,
13050         .uninit_ae_dev = hclge_uninit_ae_dev,
13051         .reset_prepare = hclge_reset_prepare_general,
13052         .reset_done = hclge_reset_done,
13053         .init_client_instance = hclge_init_client_instance,
13054         .uninit_client_instance = hclge_uninit_client_instance,
13055         .map_ring_to_vector = hclge_map_ring_to_vector,
13056         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
13057         .get_vector = hclge_get_vector,
13058         .put_vector = hclge_put_vector,
13059         .set_promisc_mode = hclge_set_promisc_mode,
13060         .request_update_promisc_mode = hclge_request_update_promisc_mode,
13061         .set_loopback = hclge_set_loopback,
13062         .start = hclge_ae_start,
13063         .stop = hclge_ae_stop,
13064         .client_start = hclge_client_start,
13065         .client_stop = hclge_client_stop,
13066         .get_status = hclge_get_status,
13067         .get_ksettings_an_result = hclge_get_ksettings_an_result,
13068         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
13069         .get_media_type = hclge_get_media_type,
13070         .check_port_speed = hclge_check_port_speed,
13071         .get_fec = hclge_get_fec,
13072         .set_fec = hclge_set_fec,
13073         .get_rss_key_size = hclge_get_rss_key_size,
13074         .get_rss = hclge_get_rss,
13075         .set_rss = hclge_set_rss,
13076         .set_rss_tuple = hclge_set_rss_tuple,
13077         .get_rss_tuple = hclge_get_rss_tuple,
13078         .get_tc_size = hclge_get_tc_size,
13079         .get_mac_addr = hclge_get_mac_addr,
13080         .set_mac_addr = hclge_set_mac_addr,
13081         .do_ioctl = hclge_do_ioctl,
13082         .add_uc_addr = hclge_add_uc_addr,
13083         .rm_uc_addr = hclge_rm_uc_addr,
13084         .add_mc_addr = hclge_add_mc_addr,
13085         .rm_mc_addr = hclge_rm_mc_addr,
13086         .set_autoneg = hclge_set_autoneg,
13087         .get_autoneg = hclge_get_autoneg,
13088         .restart_autoneg = hclge_restart_autoneg,
13089         .halt_autoneg = hclge_halt_autoneg,
13090         .get_pauseparam = hclge_get_pauseparam,
13091         .set_pauseparam = hclge_set_pauseparam,
13092         .set_mtu = hclge_set_mtu,
13093         .reset_queue = hclge_reset_tqp,
13094         .get_stats = hclge_get_stats,
13095         .get_mac_stats = hclge_get_mac_stat,
13096         .update_stats = hclge_update_stats,
13097         .get_strings = hclge_get_strings,
13098         .get_sset_count = hclge_get_sset_count,
13099         .get_fw_version = hclge_get_fw_version,
13100         .get_mdix_mode = hclge_get_mdix_mode,
13101         .enable_vlan_filter = hclge_enable_vlan_filter,
13102         .set_vlan_filter = hclge_set_vlan_filter,
13103         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
13104         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
13105         .reset_event = hclge_reset_event,
13106         .get_reset_level = hclge_get_reset_level,
13107         .set_default_reset_request = hclge_set_def_reset_request,
13108         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
13109         .set_channels = hclge_set_channels,
13110         .get_channels = hclge_get_channels,
13111         .get_regs_len = hclge_get_regs_len,
13112         .get_regs = hclge_get_regs,
13113         .set_led_id = hclge_set_led_id,
13114         .get_link_mode = hclge_get_link_mode,
13115         .add_fd_entry = hclge_add_fd_entry,
13116         .del_fd_entry = hclge_del_fd_entry,
13117         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
13118         .get_fd_rule_info = hclge_get_fd_rule_info,
13119         .get_fd_all_rules = hclge_get_all_rules,
13120         .enable_fd = hclge_enable_fd,
13121         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
13122         .dbg_read_cmd = hclge_dbg_read_cmd,
13123         .handle_hw_ras_error = hclge_handle_hw_ras_error,
13124         .get_hw_reset_stat = hclge_get_hw_reset_stat,
13125         .ae_dev_resetting = hclge_ae_dev_resetting,
13126         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
13127         .set_gro_en = hclge_gro_en,
13128         .get_global_queue_id = hclge_covert_handle_qid_global,
13129         .set_timer_task = hclge_set_timer_task,
13130         .mac_connect_phy = hclge_mac_connect_phy,
13131         .mac_disconnect_phy = hclge_mac_disconnect_phy,
13132         .get_vf_config = hclge_get_vf_config,
13133         .set_vf_link_state = hclge_set_vf_link_state,
13134         .set_vf_spoofchk = hclge_set_vf_spoofchk,
13135         .set_vf_trust = hclge_set_vf_trust,
13136         .set_vf_rate = hclge_set_vf_rate,
13137         .set_vf_mac = hclge_set_vf_mac,
13138         .get_module_eeprom = hclge_get_module_eeprom,
13139         .get_cmdq_stat = hclge_get_cmdq_stat,
13140         .add_cls_flower = hclge_add_cls_flower,
13141         .del_cls_flower = hclge_del_cls_flower,
13142         .cls_flower_active = hclge_is_cls_flower_active,
13143         .get_phy_link_ksettings = hclge_get_phy_link_ksettings,
13144         .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
13145         .set_tx_hwts_info = hclge_ptp_set_tx_info,
13146         .get_rx_hwts = hclge_ptp_get_rx_hwts,
13147         .get_ts_info = hclge_ptp_get_ts_info,
13148         .get_link_diagnosis_info = hclge_get_link_diagnosis_info,
13149 };
13150
13151 static struct hnae3_ae_algo ae_algo = {
13152         .ops = &hclge_ops,
13153         .pdev_id_table = ae_algo_pci_tbl,
13154 };
13155
13156 static int hclge_init(void)
13157 {
13158         pr_info("%s is initializing\n", HCLGE_NAME);
13159
13160         hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
13161         if (!hclge_wq) {
13162                 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
13163                 return -ENOMEM;
13164         }
13165
13166         hnae3_register_ae_algo(&ae_algo);
13167
13168         return 0;
13169 }
13170
13171 static void hclge_exit(void)
13172 {
13173         hnae3_unregister_ae_algo_prepare(&ae_algo);
13174         hnae3_unregister_ae_algo(&ae_algo);
13175         destroy_workqueue(hclge_wq);
13176 }
13177 module_init(hclge_init);
13178 module_exit(hclge_exit);
13179
13180 MODULE_LICENSE("GPL");
13181 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
13182 MODULE_DESCRIPTION("HCLGE Driver");
13183 MODULE_VERSION(HCLGE_MOD_VERSION);