8bddda7f9092076b560f8b8695917502ce251413
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME                      "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT     256U
31 #define HCLGE_BUF_MUL_BY        2
32 #define HCLGE_BUF_DIV_BY        2
33 #define NEED_RESERVE_TC_NUM     2
34 #define BUF_MAX_PERCENT         100
35 #define BUF_RESERVE_PERCENT     90
36
37 #define HCLGE_RESET_MAX_FAIL_CNT        5
38 #define HCLGE_RESET_SYNC_TIME           100
39 #define HCLGE_PF_RESET_SYNC_TIME        20
40 #define HCLGE_PF_RESET_SYNC_CNT         1500
41
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET        1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
46 #define HCLGE_DFX_IGU_BD_OFFSET         4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
49 #define HCLGE_DFX_NCSI_BD_OFFSET        7
50 #define HCLGE_DFX_RTC_BD_OFFSET         8
51 #define HCLGE_DFX_PPP_BD_OFFSET         9
52 #define HCLGE_DFX_RCB_BD_OFFSET         10
53 #define HCLGE_DFX_TQP_BD_OFFSET         11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
55
56 #define HCLGE_LINK_STATUS_MS    10
57
58 #define HCLGE_VF_VPORT_START_NUM        1
59
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
66                                u16 *allocated_size, bool is_alloc);
67 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
68 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
69 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
70                                                    unsigned long *addr);
71 static int hclge_set_default_loopback(struct hclge_dev *hdev);
72
73 static struct hnae3_ae_algo ae_algo;
74
75 static struct workqueue_struct *hclge_wq;
76
77 static const struct pci_device_id ae_algo_pci_tbl[] = {
78         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
79         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
80         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
81         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
82         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
83         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
84         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
85         /* required last entry */
86         {0, }
87 };
88
89 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
90
91 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
92                                          HCLGE_CMDQ_TX_ADDR_H_REG,
93                                          HCLGE_CMDQ_TX_DEPTH_REG,
94                                          HCLGE_CMDQ_TX_TAIL_REG,
95                                          HCLGE_CMDQ_TX_HEAD_REG,
96                                          HCLGE_CMDQ_RX_ADDR_L_REG,
97                                          HCLGE_CMDQ_RX_ADDR_H_REG,
98                                          HCLGE_CMDQ_RX_DEPTH_REG,
99                                          HCLGE_CMDQ_RX_TAIL_REG,
100                                          HCLGE_CMDQ_RX_HEAD_REG,
101                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
102                                          HCLGE_CMDQ_INTR_STS_REG,
103                                          HCLGE_CMDQ_INTR_EN_REG,
104                                          HCLGE_CMDQ_INTR_GEN_REG};
105
106 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
107                                            HCLGE_VECTOR0_OTER_EN_REG,
108                                            HCLGE_MISC_RESET_STS_REG,
109                                            HCLGE_MISC_VECTOR_INT_STS,
110                                            HCLGE_GLOBAL_RESET_REG,
111                                            HCLGE_FUN_RST_ING,
112                                            HCLGE_GRO_EN_REG};
113
114 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
115                                          HCLGE_RING_RX_ADDR_H_REG,
116                                          HCLGE_RING_RX_BD_NUM_REG,
117                                          HCLGE_RING_RX_BD_LENGTH_REG,
118                                          HCLGE_RING_RX_MERGE_EN_REG,
119                                          HCLGE_RING_RX_TAIL_REG,
120                                          HCLGE_RING_RX_HEAD_REG,
121                                          HCLGE_RING_RX_FBD_NUM_REG,
122                                          HCLGE_RING_RX_OFFSET_REG,
123                                          HCLGE_RING_RX_FBD_OFFSET_REG,
124                                          HCLGE_RING_RX_STASH_REG,
125                                          HCLGE_RING_RX_BD_ERR_REG,
126                                          HCLGE_RING_TX_ADDR_L_REG,
127                                          HCLGE_RING_TX_ADDR_H_REG,
128                                          HCLGE_RING_TX_BD_NUM_REG,
129                                          HCLGE_RING_TX_PRIORITY_REG,
130                                          HCLGE_RING_TX_TC_REG,
131                                          HCLGE_RING_TX_MERGE_EN_REG,
132                                          HCLGE_RING_TX_TAIL_REG,
133                                          HCLGE_RING_TX_HEAD_REG,
134                                          HCLGE_RING_TX_FBD_NUM_REG,
135                                          HCLGE_RING_TX_OFFSET_REG,
136                                          HCLGE_RING_TX_EBD_NUM_REG,
137                                          HCLGE_RING_TX_EBD_OFFSET_REG,
138                                          HCLGE_RING_TX_BD_ERR_REG,
139                                          HCLGE_RING_EN_REG};
140
141 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
142                                              HCLGE_TQP_INTR_GL0_REG,
143                                              HCLGE_TQP_INTR_GL1_REG,
144                                              HCLGE_TQP_INTR_GL2_REG,
145                                              HCLGE_TQP_INTR_RL_REG};
146
147 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
148         "App    Loopback test",
149         "Serdes serial Loopback test",
150         "Serdes parallel Loopback test",
151         "Phy    Loopback test"
152 };
153
154 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
155         {"mac_tx_mac_pause_num",
156                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
157         {"mac_rx_mac_pause_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
159         {"mac_tx_control_pkt_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
161         {"mac_rx_control_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
163         {"mac_tx_pfc_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
165         {"mac_tx_pfc_pri0_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
167         {"mac_tx_pfc_pri1_pkt_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
169         {"mac_tx_pfc_pri2_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
171         {"mac_tx_pfc_pri3_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
173         {"mac_tx_pfc_pri4_pkt_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
175         {"mac_tx_pfc_pri5_pkt_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
177         {"mac_tx_pfc_pri6_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
179         {"mac_tx_pfc_pri7_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
181         {"mac_rx_pfc_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
183         {"mac_rx_pfc_pri0_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
185         {"mac_rx_pfc_pri1_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
187         {"mac_rx_pfc_pri2_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
189         {"mac_rx_pfc_pri3_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
191         {"mac_rx_pfc_pri4_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
193         {"mac_rx_pfc_pri5_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
195         {"mac_rx_pfc_pri6_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
197         {"mac_rx_pfc_pri7_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
199         {"mac_tx_total_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
201         {"mac_tx_total_oct_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
203         {"mac_tx_good_pkt_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
205         {"mac_tx_bad_pkt_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
207         {"mac_tx_good_oct_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
209         {"mac_tx_bad_oct_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
211         {"mac_tx_uni_pkt_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
213         {"mac_tx_multi_pkt_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
215         {"mac_tx_broad_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
217         {"mac_tx_undersize_pkt_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
219         {"mac_tx_oversize_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
221         {"mac_tx_64_oct_pkt_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
223         {"mac_tx_65_127_oct_pkt_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
225         {"mac_tx_128_255_oct_pkt_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
227         {"mac_tx_256_511_oct_pkt_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
229         {"mac_tx_512_1023_oct_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
231         {"mac_tx_1024_1518_oct_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
233         {"mac_tx_1519_2047_oct_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
235         {"mac_tx_2048_4095_oct_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
237         {"mac_tx_4096_8191_oct_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
239         {"mac_tx_8192_9216_oct_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
241         {"mac_tx_9217_12287_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
243         {"mac_tx_12288_16383_oct_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
245         {"mac_tx_1519_max_good_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
247         {"mac_tx_1519_max_bad_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
249         {"mac_rx_total_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
251         {"mac_rx_total_oct_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
253         {"mac_rx_good_pkt_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
255         {"mac_rx_bad_pkt_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
257         {"mac_rx_good_oct_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
259         {"mac_rx_bad_oct_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
261         {"mac_rx_uni_pkt_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
263         {"mac_rx_multi_pkt_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
265         {"mac_rx_broad_pkt_num",
266                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
267         {"mac_rx_undersize_pkt_num",
268                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
269         {"mac_rx_oversize_pkt_num",
270                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
271         {"mac_rx_64_oct_pkt_num",
272                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
273         {"mac_rx_65_127_oct_pkt_num",
274                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
275         {"mac_rx_128_255_oct_pkt_num",
276                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
277         {"mac_rx_256_511_oct_pkt_num",
278                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
279         {"mac_rx_512_1023_oct_pkt_num",
280                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
281         {"mac_rx_1024_1518_oct_pkt_num",
282                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
283         {"mac_rx_1519_2047_oct_pkt_num",
284                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
285         {"mac_rx_2048_4095_oct_pkt_num",
286                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
287         {"mac_rx_4096_8191_oct_pkt_num",
288                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
289         {"mac_rx_8192_9216_oct_pkt_num",
290                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
291         {"mac_rx_9217_12287_oct_pkt_num",
292                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
293         {"mac_rx_12288_16383_oct_pkt_num",
294                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
295         {"mac_rx_1519_max_good_pkt_num",
296                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
297         {"mac_rx_1519_max_bad_pkt_num",
298                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
299
300         {"mac_tx_fragment_pkt_num",
301                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
302         {"mac_tx_undermin_pkt_num",
303                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
304         {"mac_tx_jabber_pkt_num",
305                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
306         {"mac_tx_err_all_pkt_num",
307                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
308         {"mac_tx_from_app_good_pkt_num",
309                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
310         {"mac_tx_from_app_bad_pkt_num",
311                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
312         {"mac_rx_fragment_pkt_num",
313                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
314         {"mac_rx_undermin_pkt_num",
315                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
316         {"mac_rx_jabber_pkt_num",
317                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
318         {"mac_rx_fcs_err_pkt_num",
319                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
320         {"mac_rx_send_app_good_pkt_num",
321                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
322         {"mac_rx_send_app_bad_pkt_num",
323                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
324 };
325
326 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
327         {
328                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
329                 .ethter_type = cpu_to_le16(ETH_P_LLDP),
330                 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
331                 .i_port_bitmap = 0x1,
332         },
333 };
334
335 static const u8 hclge_hash_key[] = {
336         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
337         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
338         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
339         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
340         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
341 };
342
343 static const u32 hclge_dfx_bd_offset_list[] = {
344         HCLGE_DFX_BIOS_BD_OFFSET,
345         HCLGE_DFX_SSU_0_BD_OFFSET,
346         HCLGE_DFX_SSU_1_BD_OFFSET,
347         HCLGE_DFX_IGU_BD_OFFSET,
348         HCLGE_DFX_RPU_0_BD_OFFSET,
349         HCLGE_DFX_RPU_1_BD_OFFSET,
350         HCLGE_DFX_NCSI_BD_OFFSET,
351         HCLGE_DFX_RTC_BD_OFFSET,
352         HCLGE_DFX_PPP_BD_OFFSET,
353         HCLGE_DFX_RCB_BD_OFFSET,
354         HCLGE_DFX_TQP_BD_OFFSET,
355         HCLGE_DFX_SSU_2_BD_OFFSET
356 };
357
358 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
359         HCLGE_OPC_DFX_BIOS_COMMON_REG,
360         HCLGE_OPC_DFX_SSU_REG_0,
361         HCLGE_OPC_DFX_SSU_REG_1,
362         HCLGE_OPC_DFX_IGU_EGU_REG,
363         HCLGE_OPC_DFX_RPU_REG_0,
364         HCLGE_OPC_DFX_RPU_REG_1,
365         HCLGE_OPC_DFX_NCSI_REG,
366         HCLGE_OPC_DFX_RTC_REG,
367         HCLGE_OPC_DFX_PPP_REG,
368         HCLGE_OPC_DFX_RCB_REG,
369         HCLGE_OPC_DFX_TQP_REG,
370         HCLGE_OPC_DFX_SSU_REG_2
371 };
372
373 static const struct key_info meta_data_key_info[] = {
374         { PACKET_TYPE_ID, 6},
375         { IP_FRAGEMENT, 1},
376         { ROCE_TYPE, 1},
377         { NEXT_KEY, 5},
378         { VLAN_NUMBER, 2},
379         { SRC_VPORT, 12},
380         { DST_VPORT, 12},
381         { TUNNEL_PACKET, 1},
382 };
383
384 static const struct key_info tuple_key_info[] = {
385         { OUTER_DST_MAC, 48},
386         { OUTER_SRC_MAC, 48},
387         { OUTER_VLAN_TAG_FST, 16},
388         { OUTER_VLAN_TAG_SEC, 16},
389         { OUTER_ETH_TYPE, 16},
390         { OUTER_L2_RSV, 16},
391         { OUTER_IP_TOS, 8},
392         { OUTER_IP_PROTO, 8},
393         { OUTER_SRC_IP, 32},
394         { OUTER_DST_IP, 32},
395         { OUTER_L3_RSV, 16},
396         { OUTER_SRC_PORT, 16},
397         { OUTER_DST_PORT, 16},
398         { OUTER_L4_RSV, 32},
399         { OUTER_TUN_VNI, 24},
400         { OUTER_TUN_FLOW_ID, 8},
401         { INNER_DST_MAC, 48},
402         { INNER_SRC_MAC, 48},
403         { INNER_VLAN_TAG_FST, 16},
404         { INNER_VLAN_TAG_SEC, 16},
405         { INNER_ETH_TYPE, 16},
406         { INNER_L2_RSV, 16},
407         { INNER_IP_TOS, 8},
408         { INNER_IP_PROTO, 8},
409         { INNER_SRC_IP, 32},
410         { INNER_DST_IP, 32},
411         { INNER_L3_RSV, 16},
412         { INNER_SRC_PORT, 16},
413         { INNER_DST_PORT, 16},
414         { INNER_L4_RSV, 32},
415 };
416
417 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
418 {
419 #define HCLGE_MAC_CMD_NUM 21
420
421         u64 *data = (u64 *)(&hdev->mac_stats);
422         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
423         __le64 *desc_data;
424         int i, k, n;
425         int ret;
426
427         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
428         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
429         if (ret) {
430                 dev_err(&hdev->pdev->dev,
431                         "Get MAC pkt stats fail, status = %d.\n", ret);
432
433                 return ret;
434         }
435
436         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
437                 /* for special opcode 0032, only the first desc has the head */
438                 if (unlikely(i == 0)) {
439                         desc_data = (__le64 *)(&desc[i].data[0]);
440                         n = HCLGE_RD_FIRST_STATS_NUM;
441                 } else {
442                         desc_data = (__le64 *)(&desc[i]);
443                         n = HCLGE_RD_OTHER_STATS_NUM;
444                 }
445
446                 for (k = 0; k < n; k++) {
447                         *data += le64_to_cpu(*desc_data);
448                         data++;
449                         desc_data++;
450                 }
451         }
452
453         return 0;
454 }
455
456 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
457 {
458         u64 *data = (u64 *)(&hdev->mac_stats);
459         struct hclge_desc *desc;
460         __le64 *desc_data;
461         u16 i, k, n;
462         int ret;
463
464         /* This may be called inside atomic sections,
465          * so GFP_ATOMIC is more suitalbe here
466          */
467         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
468         if (!desc)
469                 return -ENOMEM;
470
471         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
472         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
473         if (ret) {
474                 kfree(desc);
475                 return ret;
476         }
477
478         for (i = 0; i < desc_num; i++) {
479                 /* for special opcode 0034, only the first desc has the head */
480                 if (i == 0) {
481                         desc_data = (__le64 *)(&desc[i].data[0]);
482                         n = HCLGE_RD_FIRST_STATS_NUM;
483                 } else {
484                         desc_data = (__le64 *)(&desc[i]);
485                         n = HCLGE_RD_OTHER_STATS_NUM;
486                 }
487
488                 for (k = 0; k < n; k++) {
489                         *data += le64_to_cpu(*desc_data);
490                         data++;
491                         desc_data++;
492                 }
493         }
494
495         kfree(desc);
496
497         return 0;
498 }
499
500 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
501 {
502         struct hclge_desc desc;
503         __le32 *desc_data;
504         u32 reg_num;
505         int ret;
506
507         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
508         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
509         if (ret)
510                 return ret;
511
512         desc_data = (__le32 *)(&desc.data[0]);
513         reg_num = le32_to_cpu(*desc_data);
514
515         *desc_num = 1 + ((reg_num - 3) >> 2) +
516                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
517
518         return 0;
519 }
520
521 static int hclge_mac_update_stats(struct hclge_dev *hdev)
522 {
523         u32 desc_num;
524         int ret;
525
526         ret = hclge_mac_query_reg_num(hdev, &desc_num);
527
528         /* The firmware supports the new statistics acquisition method */
529         if (!ret)
530                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
531         else if (ret == -EOPNOTSUPP)
532                 ret = hclge_mac_update_stats_defective(hdev);
533         else
534                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
535
536         return ret;
537 }
538
539 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
540 {
541         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
542         struct hclge_vport *vport = hclge_get_vport(handle);
543         struct hclge_dev *hdev = vport->back;
544         struct hnae3_queue *queue;
545         struct hclge_desc desc[1];
546         struct hclge_tqp *tqp;
547         int ret, i;
548
549         for (i = 0; i < kinfo->num_tqps; i++) {
550                 queue = handle->kinfo.tqp[i];
551                 tqp = container_of(queue, struct hclge_tqp, q);
552                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
553                 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
554                                            true);
555
556                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
557                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
558                 if (ret) {
559                         dev_err(&hdev->pdev->dev,
560                                 "Query tqp stat fail, status = %d,queue = %d\n",
561                                 ret, i);
562                         return ret;
563                 }
564                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
565                         le32_to_cpu(desc[0].data[1]);
566         }
567
568         for (i = 0; i < kinfo->num_tqps; i++) {
569                 queue = handle->kinfo.tqp[i];
570                 tqp = container_of(queue, struct hclge_tqp, q);
571                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
572                 hclge_cmd_setup_basic_desc(&desc[0],
573                                            HCLGE_OPC_QUERY_TX_STATUS,
574                                            true);
575
576                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
577                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
578                 if (ret) {
579                         dev_err(&hdev->pdev->dev,
580                                 "Query tqp stat fail, status = %d,queue = %d\n",
581                                 ret, i);
582                         return ret;
583                 }
584                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
585                         le32_to_cpu(desc[0].data[1]);
586         }
587
588         return 0;
589 }
590
591 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
592 {
593         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
594         struct hclge_tqp *tqp;
595         u64 *buff = data;
596         int i;
597
598         for (i = 0; i < kinfo->num_tqps; i++) {
599                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
600                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
601         }
602
603         for (i = 0; i < kinfo->num_tqps; i++) {
604                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
605                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
606         }
607
608         return buff;
609 }
610
611 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
612 {
613         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
614
615         /* each tqp has TX & RX two queues */
616         return kinfo->num_tqps * (2);
617 }
618
619 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
620 {
621         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
622         u8 *buff = data;
623         int i = 0;
624
625         for (i = 0; i < kinfo->num_tqps; i++) {
626                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
627                         struct hclge_tqp, q);
628                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
629                          tqp->index);
630                 buff = buff + ETH_GSTRING_LEN;
631         }
632
633         for (i = 0; i < kinfo->num_tqps; i++) {
634                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
635                         struct hclge_tqp, q);
636                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
637                          tqp->index);
638                 buff = buff + ETH_GSTRING_LEN;
639         }
640
641         return buff;
642 }
643
644 static u64 *hclge_comm_get_stats(const void *comm_stats,
645                                  const struct hclge_comm_stats_str strs[],
646                                  int size, u64 *data)
647 {
648         u64 *buf = data;
649         u32 i;
650
651         for (i = 0; i < size; i++)
652                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
653
654         return buf + size;
655 }
656
657 static u8 *hclge_comm_get_strings(u32 stringset,
658                                   const struct hclge_comm_stats_str strs[],
659                                   int size, u8 *data)
660 {
661         char *buff = (char *)data;
662         u32 i;
663
664         if (stringset != ETH_SS_STATS)
665                 return buff;
666
667         for (i = 0; i < size; i++) {
668                 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
669                 buff = buff + ETH_GSTRING_LEN;
670         }
671
672         return (u8 *)buff;
673 }
674
675 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
676 {
677         struct hnae3_handle *handle;
678         int status;
679
680         handle = &hdev->vport[0].nic;
681         if (handle->client) {
682                 status = hclge_tqps_update_stats(handle);
683                 if (status) {
684                         dev_err(&hdev->pdev->dev,
685                                 "Update TQPS stats fail, status = %d.\n",
686                                 status);
687                 }
688         }
689
690         status = hclge_mac_update_stats(hdev);
691         if (status)
692                 dev_err(&hdev->pdev->dev,
693                         "Update MAC stats fail, status = %d.\n", status);
694 }
695
696 static void hclge_update_stats(struct hnae3_handle *handle,
697                                struct net_device_stats *net_stats)
698 {
699         struct hclge_vport *vport = hclge_get_vport(handle);
700         struct hclge_dev *hdev = vport->back;
701         int status;
702
703         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
704                 return;
705
706         status = hclge_mac_update_stats(hdev);
707         if (status)
708                 dev_err(&hdev->pdev->dev,
709                         "Update MAC stats fail, status = %d.\n",
710                         status);
711
712         status = hclge_tqps_update_stats(handle);
713         if (status)
714                 dev_err(&hdev->pdev->dev,
715                         "Update TQPS stats fail, status = %d.\n",
716                         status);
717
718         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
719 }
720
721 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
722 {
723 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
724                 HNAE3_SUPPORT_PHY_LOOPBACK |\
725                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
726                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
727
728         struct hclge_vport *vport = hclge_get_vport(handle);
729         struct hclge_dev *hdev = vport->back;
730         int count = 0;
731
732         /* Loopback test support rules:
733          * mac: only GE mode support
734          * serdes: all mac mode will support include GE/XGE/LGE/CGE
735          * phy: only support when phy device exist on board
736          */
737         if (stringset == ETH_SS_TEST) {
738                 /* clear loopback bit flags at first */
739                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
740                 if (hdev->pdev->revision >= 0x21 ||
741                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
742                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
743                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
744                         count += 1;
745                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
746                 }
747
748                 count += 2;
749                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
750                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
751
752                 if (hdev->hw.mac.phydev) {
753                         count += 1;
754                         handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
755                 }
756
757         } else if (stringset == ETH_SS_STATS) {
758                 count = ARRAY_SIZE(g_mac_stats_string) +
759                         hclge_tqps_get_sset_count(handle, stringset);
760         }
761
762         return count;
763 }
764
765 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
766                               u8 *data)
767 {
768         u8 *p = (char *)data;
769         int size;
770
771         if (stringset == ETH_SS_STATS) {
772                 size = ARRAY_SIZE(g_mac_stats_string);
773                 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
774                                            size, p);
775                 p = hclge_tqps_get_strings(handle, p);
776         } else if (stringset == ETH_SS_TEST) {
777                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
778                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
779                                ETH_GSTRING_LEN);
780                         p += ETH_GSTRING_LEN;
781                 }
782                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
783                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
784                                ETH_GSTRING_LEN);
785                         p += ETH_GSTRING_LEN;
786                 }
787                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
788                         memcpy(p,
789                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
790                                ETH_GSTRING_LEN);
791                         p += ETH_GSTRING_LEN;
792                 }
793                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
794                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
795                                ETH_GSTRING_LEN);
796                         p += ETH_GSTRING_LEN;
797                 }
798         }
799 }
800
801 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
802 {
803         struct hclge_vport *vport = hclge_get_vport(handle);
804         struct hclge_dev *hdev = vport->back;
805         u64 *p;
806
807         p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
808                                  ARRAY_SIZE(g_mac_stats_string), data);
809         p = hclge_tqps_get_stats(handle, p);
810 }
811
812 static void hclge_get_mac_stat(struct hnae3_handle *handle,
813                                struct hns3_mac_stats *mac_stats)
814 {
815         struct hclge_vport *vport = hclge_get_vport(handle);
816         struct hclge_dev *hdev = vport->back;
817
818         hclge_update_stats(handle, NULL);
819
820         mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
821         mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
822 }
823
824 static int hclge_parse_func_status(struct hclge_dev *hdev,
825                                    struct hclge_func_status_cmd *status)
826 {
827         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
828                 return -EINVAL;
829
830         /* Set the pf to main pf */
831         if (status->pf_state & HCLGE_PF_STATE_MAIN)
832                 hdev->flag |= HCLGE_FLAG_MAIN;
833         else
834                 hdev->flag &= ~HCLGE_FLAG_MAIN;
835
836         return 0;
837 }
838
839 static int hclge_query_function_status(struct hclge_dev *hdev)
840 {
841 #define HCLGE_QUERY_MAX_CNT     5
842
843         struct hclge_func_status_cmd *req;
844         struct hclge_desc desc;
845         int timeout = 0;
846         int ret;
847
848         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
849         req = (struct hclge_func_status_cmd *)desc.data;
850
851         do {
852                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
853                 if (ret) {
854                         dev_err(&hdev->pdev->dev,
855                                 "query function status failed %d.\n", ret);
856                         return ret;
857                 }
858
859                 /* Check pf reset is done */
860                 if (req->pf_state)
861                         break;
862                 usleep_range(1000, 2000);
863         } while (timeout++ < HCLGE_QUERY_MAX_CNT);
864
865         ret = hclge_parse_func_status(hdev, req);
866
867         return ret;
868 }
869
870 static int hclge_query_pf_resource(struct hclge_dev *hdev)
871 {
872         struct hclge_pf_res_cmd *req;
873         struct hclge_desc desc;
874         int ret;
875
876         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
877         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
878         if (ret) {
879                 dev_err(&hdev->pdev->dev,
880                         "query pf resource failed %d.\n", ret);
881                 return ret;
882         }
883
884         req = (struct hclge_pf_res_cmd *)desc.data;
885         hdev->num_tqps = __le16_to_cpu(req->tqp_num);
886         hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
887
888         if (req->tx_buf_size)
889                 hdev->tx_buf_size =
890                         __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
891         else
892                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
893
894         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
895
896         if (req->dv_buf_size)
897                 hdev->dv_buf_size =
898                         __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
899         else
900                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
901
902         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
903
904         if (hnae3_dev_roce_supported(hdev)) {
905                 hdev->roce_base_msix_offset =
906                 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
907                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
908                 hdev->num_roce_msi =
909                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
910                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
911
912                 /* nic's msix numbers is always equals to the roce's. */
913                 hdev->num_nic_msi = hdev->num_roce_msi;
914
915                 /* PF should have NIC vectors and Roce vectors,
916                  * NIC vectors are queued before Roce vectors.
917                  */
918                 hdev->num_msi = hdev->num_roce_msi +
919                                 hdev->roce_base_msix_offset;
920         } else {
921                 hdev->num_msi =
922                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
923                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
924
925                 hdev->num_nic_msi = hdev->num_msi;
926         }
927
928         if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
929                 dev_err(&hdev->pdev->dev,
930                         "Just %u msi resources, not enough for pf(min:2).\n",
931                         hdev->num_nic_msi);
932                 return -EINVAL;
933         }
934
935         return 0;
936 }
937
938 static int hclge_parse_speed(int speed_cmd, int *speed)
939 {
940         switch (speed_cmd) {
941         case 6:
942                 *speed = HCLGE_MAC_SPEED_10M;
943                 break;
944         case 7:
945                 *speed = HCLGE_MAC_SPEED_100M;
946                 break;
947         case 0:
948                 *speed = HCLGE_MAC_SPEED_1G;
949                 break;
950         case 1:
951                 *speed = HCLGE_MAC_SPEED_10G;
952                 break;
953         case 2:
954                 *speed = HCLGE_MAC_SPEED_25G;
955                 break;
956         case 3:
957                 *speed = HCLGE_MAC_SPEED_40G;
958                 break;
959         case 4:
960                 *speed = HCLGE_MAC_SPEED_50G;
961                 break;
962         case 5:
963                 *speed = HCLGE_MAC_SPEED_100G;
964                 break;
965         default:
966                 return -EINVAL;
967         }
968
969         return 0;
970 }
971
972 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
973 {
974         struct hclge_vport *vport = hclge_get_vport(handle);
975         struct hclge_dev *hdev = vport->back;
976         u32 speed_ability = hdev->hw.mac.speed_ability;
977         u32 speed_bit = 0;
978
979         switch (speed) {
980         case HCLGE_MAC_SPEED_10M:
981                 speed_bit = HCLGE_SUPPORT_10M_BIT;
982                 break;
983         case HCLGE_MAC_SPEED_100M:
984                 speed_bit = HCLGE_SUPPORT_100M_BIT;
985                 break;
986         case HCLGE_MAC_SPEED_1G:
987                 speed_bit = HCLGE_SUPPORT_1G_BIT;
988                 break;
989         case HCLGE_MAC_SPEED_10G:
990                 speed_bit = HCLGE_SUPPORT_10G_BIT;
991                 break;
992         case HCLGE_MAC_SPEED_25G:
993                 speed_bit = HCLGE_SUPPORT_25G_BIT;
994                 break;
995         case HCLGE_MAC_SPEED_40G:
996                 speed_bit = HCLGE_SUPPORT_40G_BIT;
997                 break;
998         case HCLGE_MAC_SPEED_50G:
999                 speed_bit = HCLGE_SUPPORT_50G_BIT;
1000                 break;
1001         case HCLGE_MAC_SPEED_100G:
1002                 speed_bit = HCLGE_SUPPORT_100G_BIT;
1003                 break;
1004         default:
1005                 return -EINVAL;
1006         }
1007
1008         if (speed_bit & speed_ability)
1009                 return 0;
1010
1011         return -EINVAL;
1012 }
1013
1014 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1015 {
1016         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1017                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1018                                  mac->supported);
1019         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1020                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1021                                  mac->supported);
1022         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1023                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1024                                  mac->supported);
1025         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1026                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1027                                  mac->supported);
1028         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1029                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1030                                  mac->supported);
1031 }
1032
1033 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1034 {
1035         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1036                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1037                                  mac->supported);
1038         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1039                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1040                                  mac->supported);
1041         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1042                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1043                                  mac->supported);
1044         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1045                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1046                                  mac->supported);
1047         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1048                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1049                                  mac->supported);
1050 }
1051
1052 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1053 {
1054         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1055                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1056                                  mac->supported);
1057         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1058                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1059                                  mac->supported);
1060         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1061                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1062                                  mac->supported);
1063         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1064                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1065                                  mac->supported);
1066         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1067                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1068                                  mac->supported);
1069 }
1070
1071 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1072 {
1073         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1074                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1075                                  mac->supported);
1076         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1077                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1078                                  mac->supported);
1079         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1080                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1081                                  mac->supported);
1082         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1083                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1084                                  mac->supported);
1085         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1086                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1087                                  mac->supported);
1088         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1089                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1090                                  mac->supported);
1091 }
1092
1093 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1094 {
1095         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1096         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1097
1098         switch (mac->speed) {
1099         case HCLGE_MAC_SPEED_10G:
1100         case HCLGE_MAC_SPEED_40G:
1101                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1102                                  mac->supported);
1103                 mac->fec_ability =
1104                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1105                 break;
1106         case HCLGE_MAC_SPEED_25G:
1107         case HCLGE_MAC_SPEED_50G:
1108                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1109                                  mac->supported);
1110                 mac->fec_ability =
1111                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1112                         BIT(HNAE3_FEC_AUTO);
1113                 break;
1114         case HCLGE_MAC_SPEED_100G:
1115                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1116                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1117                 break;
1118         default:
1119                 mac->fec_ability = 0;
1120                 break;
1121         }
1122 }
1123
1124 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1125                                         u8 speed_ability)
1126 {
1127         struct hclge_mac *mac = &hdev->hw.mac;
1128
1129         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1130                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1131                                  mac->supported);
1132
1133         hclge_convert_setting_sr(mac, speed_ability);
1134         hclge_convert_setting_lr(mac, speed_ability);
1135         hclge_convert_setting_cr(mac, speed_ability);
1136         if (hdev->pdev->revision >= 0x21)
1137                 hclge_convert_setting_fec(mac);
1138
1139         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1140         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1141         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1142 }
1143
1144 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1145                                             u8 speed_ability)
1146 {
1147         struct hclge_mac *mac = &hdev->hw.mac;
1148
1149         hclge_convert_setting_kr(mac, speed_ability);
1150         if (hdev->pdev->revision >= 0x21)
1151                 hclge_convert_setting_fec(mac);
1152         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1153         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1154         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1155 }
1156
1157 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1158                                          u8 speed_ability)
1159 {
1160         unsigned long *supported = hdev->hw.mac.supported;
1161
1162         /* default to support all speed for GE port */
1163         if (!speed_ability)
1164                 speed_ability = HCLGE_SUPPORT_GE;
1165
1166         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1167                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1168                                  supported);
1169
1170         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1171                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1172                                  supported);
1173                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1174                                  supported);
1175         }
1176
1177         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1178                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1179                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1180         }
1181
1182         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1183         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1184         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1185         linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1186 }
1187
1188 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1189 {
1190         u8 media_type = hdev->hw.mac.media_type;
1191
1192         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1193                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1194         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1195                 hclge_parse_copper_link_mode(hdev, speed_ability);
1196         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1197                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1198 }
1199
1200 static u32 hclge_get_max_speed(u8 speed_ability)
1201 {
1202         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1203                 return HCLGE_MAC_SPEED_100G;
1204
1205         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1206                 return HCLGE_MAC_SPEED_50G;
1207
1208         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1209                 return HCLGE_MAC_SPEED_40G;
1210
1211         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1212                 return HCLGE_MAC_SPEED_25G;
1213
1214         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1215                 return HCLGE_MAC_SPEED_10G;
1216
1217         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1218                 return HCLGE_MAC_SPEED_1G;
1219
1220         if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1221                 return HCLGE_MAC_SPEED_100M;
1222
1223         if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1224                 return HCLGE_MAC_SPEED_10M;
1225
1226         return HCLGE_MAC_SPEED_1G;
1227 }
1228
1229 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1230 {
1231         struct hclge_cfg_param_cmd *req;
1232         u64 mac_addr_tmp_high;
1233         u64 mac_addr_tmp;
1234         unsigned int i;
1235
1236         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1237
1238         /* get the configuration */
1239         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1240                                               HCLGE_CFG_VMDQ_M,
1241                                               HCLGE_CFG_VMDQ_S);
1242         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1243                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1244         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1245                                             HCLGE_CFG_TQP_DESC_N_M,
1246                                             HCLGE_CFG_TQP_DESC_N_S);
1247
1248         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1249                                         HCLGE_CFG_PHY_ADDR_M,
1250                                         HCLGE_CFG_PHY_ADDR_S);
1251         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1252                                           HCLGE_CFG_MEDIA_TP_M,
1253                                           HCLGE_CFG_MEDIA_TP_S);
1254         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1255                                           HCLGE_CFG_RX_BUF_LEN_M,
1256                                           HCLGE_CFG_RX_BUF_LEN_S);
1257         /* get mac_address */
1258         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1259         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1260                                             HCLGE_CFG_MAC_ADDR_H_M,
1261                                             HCLGE_CFG_MAC_ADDR_H_S);
1262
1263         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1264
1265         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1266                                              HCLGE_CFG_DEFAULT_SPEED_M,
1267                                              HCLGE_CFG_DEFAULT_SPEED_S);
1268         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1269                                             HCLGE_CFG_RSS_SIZE_M,
1270                                             HCLGE_CFG_RSS_SIZE_S);
1271
1272         for (i = 0; i < ETH_ALEN; i++)
1273                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1274
1275         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1276         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1277
1278         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1279                                              HCLGE_CFG_SPEED_ABILITY_M,
1280                                              HCLGE_CFG_SPEED_ABILITY_S);
1281         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1282                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1283                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1284         if (!cfg->umv_space)
1285                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1286 }
1287
1288 /* hclge_get_cfg: query the static parameter from flash
1289  * @hdev: pointer to struct hclge_dev
1290  * @hcfg: the config structure to be getted
1291  */
1292 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1293 {
1294         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1295         struct hclge_cfg_param_cmd *req;
1296         unsigned int i;
1297         int ret;
1298
1299         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1300                 u32 offset = 0;
1301
1302                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1303                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1304                                            true);
1305                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1306                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1307                 /* Len should be united by 4 bytes when send to hardware */
1308                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1309                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1310                 req->offset = cpu_to_le32(offset);
1311         }
1312
1313         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1314         if (ret) {
1315                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1316                 return ret;
1317         }
1318
1319         hclge_parse_cfg(hcfg, desc);
1320
1321         return 0;
1322 }
1323
1324 static int hclge_get_cap(struct hclge_dev *hdev)
1325 {
1326         int ret;
1327
1328         ret = hclge_query_function_status(hdev);
1329         if (ret) {
1330                 dev_err(&hdev->pdev->dev,
1331                         "query function status error %d.\n", ret);
1332                 return ret;
1333         }
1334
1335         /* get pf resource */
1336         ret = hclge_query_pf_resource(hdev);
1337         if (ret)
1338                 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1339
1340         return ret;
1341 }
1342
1343 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1344 {
1345 #define HCLGE_MIN_TX_DESC       64
1346 #define HCLGE_MIN_RX_DESC       64
1347
1348         if (!is_kdump_kernel())
1349                 return;
1350
1351         dev_info(&hdev->pdev->dev,
1352                  "Running kdump kernel. Using minimal resources\n");
1353
1354         /* minimal queue pairs equals to the number of vports */
1355         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1356         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1357         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1358 }
1359
1360 static int hclge_configure(struct hclge_dev *hdev)
1361 {
1362         struct hclge_cfg cfg;
1363         unsigned int i;
1364         int ret;
1365
1366         ret = hclge_get_cfg(hdev, &cfg);
1367         if (ret) {
1368                 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1369                 return ret;
1370         }
1371
1372         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1373         hdev->base_tqp_pid = 0;
1374         hdev->rss_size_max = cfg.rss_size_max;
1375         hdev->rx_buf_len = cfg.rx_buf_len;
1376         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1377         hdev->hw.mac.media_type = cfg.media_type;
1378         hdev->hw.mac.phy_addr = cfg.phy_addr;
1379         hdev->num_tx_desc = cfg.tqp_desc_num;
1380         hdev->num_rx_desc = cfg.tqp_desc_num;
1381         hdev->tm_info.num_pg = 1;
1382         hdev->tc_max = cfg.tc_num;
1383         hdev->tm_info.hw_pfc_map = 0;
1384         hdev->wanted_umv_size = cfg.umv_space;
1385
1386         if (hnae3_dev_fd_supported(hdev)) {
1387                 hdev->fd_en = true;
1388                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1389         }
1390
1391         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1392         if (ret) {
1393                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1394                 return ret;
1395         }
1396
1397         hclge_parse_link_mode(hdev, cfg.speed_ability);
1398
1399         hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1400
1401         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1402             (hdev->tc_max < 1)) {
1403                 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1404                          hdev->tc_max);
1405                 hdev->tc_max = 1;
1406         }
1407
1408         /* Dev does not support DCB */
1409         if (!hnae3_dev_dcb_supported(hdev)) {
1410                 hdev->tc_max = 1;
1411                 hdev->pfc_max = 0;
1412         } else {
1413                 hdev->pfc_max = hdev->tc_max;
1414         }
1415
1416         hdev->tm_info.num_tc = 1;
1417
1418         /* Currently not support uncontiuous tc */
1419         for (i = 0; i < hdev->tm_info.num_tc; i++)
1420                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1421
1422         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1423
1424         hclge_init_kdump_kernel_config(hdev);
1425
1426         /* Set the init affinity based on pci func number */
1427         i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1428         i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1429         cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1430                         &hdev->affinity_mask);
1431
1432         return ret;
1433 }
1434
1435 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1436                             unsigned int tso_mss_max)
1437 {
1438         struct hclge_cfg_tso_status_cmd *req;
1439         struct hclge_desc desc;
1440         u16 tso_mss;
1441
1442         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1443
1444         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1445
1446         tso_mss = 0;
1447         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1448                         HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1449         req->tso_mss_min = cpu_to_le16(tso_mss);
1450
1451         tso_mss = 0;
1452         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1453                         HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1454         req->tso_mss_max = cpu_to_le16(tso_mss);
1455
1456         return hclge_cmd_send(&hdev->hw, &desc, 1);
1457 }
1458
1459 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1460 {
1461         struct hclge_cfg_gro_status_cmd *req;
1462         struct hclge_desc desc;
1463         int ret;
1464
1465         if (!hnae3_dev_gro_supported(hdev))
1466                 return 0;
1467
1468         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1469         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1470
1471         req->gro_en = cpu_to_le16(en ? 1 : 0);
1472
1473         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1474         if (ret)
1475                 dev_err(&hdev->pdev->dev,
1476                         "GRO hardware config cmd failed, ret = %d\n", ret);
1477
1478         return ret;
1479 }
1480
1481 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1482 {
1483         struct hclge_tqp *tqp;
1484         int i;
1485
1486         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1487                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1488         if (!hdev->htqp)
1489                 return -ENOMEM;
1490
1491         tqp = hdev->htqp;
1492
1493         for (i = 0; i < hdev->num_tqps; i++) {
1494                 tqp->dev = &hdev->pdev->dev;
1495                 tqp->index = i;
1496
1497                 tqp->q.ae_algo = &ae_algo;
1498                 tqp->q.buf_size = hdev->rx_buf_len;
1499                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1500                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1501                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1502                         i * HCLGE_TQP_REG_SIZE;
1503
1504                 tqp++;
1505         }
1506
1507         return 0;
1508 }
1509
1510 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1511                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1512 {
1513         struct hclge_tqp_map_cmd *req;
1514         struct hclge_desc desc;
1515         int ret;
1516
1517         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1518
1519         req = (struct hclge_tqp_map_cmd *)desc.data;
1520         req->tqp_id = cpu_to_le16(tqp_pid);
1521         req->tqp_vf = func_id;
1522         req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1523         if (!is_pf)
1524                 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1525         req->tqp_vid = cpu_to_le16(tqp_vid);
1526
1527         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1528         if (ret)
1529                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1530
1531         return ret;
1532 }
1533
1534 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1535 {
1536         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1537         struct hclge_dev *hdev = vport->back;
1538         int i, alloced;
1539
1540         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1541              alloced < num_tqps; i++) {
1542                 if (!hdev->htqp[i].alloced) {
1543                         hdev->htqp[i].q.handle = &vport->nic;
1544                         hdev->htqp[i].q.tqp_index = alloced;
1545                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1546                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1547                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1548                         hdev->htqp[i].alloced = true;
1549                         alloced++;
1550                 }
1551         }
1552         vport->alloc_tqps = alloced;
1553         kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1554                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1555
1556         /* ensure one to one mapping between irq and queue at default */
1557         kinfo->rss_size = min_t(u16, kinfo->rss_size,
1558                                 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1559
1560         return 0;
1561 }
1562
1563 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1564                             u16 num_tx_desc, u16 num_rx_desc)
1565
1566 {
1567         struct hnae3_handle *nic = &vport->nic;
1568         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1569         struct hclge_dev *hdev = vport->back;
1570         int ret;
1571
1572         kinfo->num_tx_desc = num_tx_desc;
1573         kinfo->num_rx_desc = num_rx_desc;
1574
1575         kinfo->rx_buf_len = hdev->rx_buf_len;
1576
1577         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1578                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1579         if (!kinfo->tqp)
1580                 return -ENOMEM;
1581
1582         ret = hclge_assign_tqp(vport, num_tqps);
1583         if (ret)
1584                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1585
1586         return ret;
1587 }
1588
1589 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1590                                   struct hclge_vport *vport)
1591 {
1592         struct hnae3_handle *nic = &vport->nic;
1593         struct hnae3_knic_private_info *kinfo;
1594         u16 i;
1595
1596         kinfo = &nic->kinfo;
1597         for (i = 0; i < vport->alloc_tqps; i++) {
1598                 struct hclge_tqp *q =
1599                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1600                 bool is_pf;
1601                 int ret;
1602
1603                 is_pf = !(vport->vport_id);
1604                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1605                                              i, is_pf);
1606                 if (ret)
1607                         return ret;
1608         }
1609
1610         return 0;
1611 }
1612
1613 static int hclge_map_tqp(struct hclge_dev *hdev)
1614 {
1615         struct hclge_vport *vport = hdev->vport;
1616         u16 i, num_vport;
1617
1618         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1619         for (i = 0; i < num_vport; i++) {
1620                 int ret;
1621
1622                 ret = hclge_map_tqp_to_vport(hdev, vport);
1623                 if (ret)
1624                         return ret;
1625
1626                 vport++;
1627         }
1628
1629         return 0;
1630 }
1631
1632 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1633 {
1634         struct hnae3_handle *nic = &vport->nic;
1635         struct hclge_dev *hdev = vport->back;
1636         int ret;
1637
1638         nic->pdev = hdev->pdev;
1639         nic->ae_algo = &ae_algo;
1640         nic->numa_node_mask = hdev->numa_node_mask;
1641
1642         ret = hclge_knic_setup(vport, num_tqps,
1643                                hdev->num_tx_desc, hdev->num_rx_desc);
1644         if (ret)
1645                 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1646
1647         return ret;
1648 }
1649
1650 static int hclge_alloc_vport(struct hclge_dev *hdev)
1651 {
1652         struct pci_dev *pdev = hdev->pdev;
1653         struct hclge_vport *vport;
1654         u32 tqp_main_vport;
1655         u32 tqp_per_vport;
1656         int num_vport, i;
1657         int ret;
1658
1659         /* We need to alloc a vport for main NIC of PF */
1660         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1661
1662         if (hdev->num_tqps < num_vport) {
1663                 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1664                         hdev->num_tqps, num_vport);
1665                 return -EINVAL;
1666         }
1667
1668         /* Alloc the same number of TQPs for every vport */
1669         tqp_per_vport = hdev->num_tqps / num_vport;
1670         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1671
1672         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1673                              GFP_KERNEL);
1674         if (!vport)
1675                 return -ENOMEM;
1676
1677         hdev->vport = vport;
1678         hdev->num_alloc_vport = num_vport;
1679
1680         if (IS_ENABLED(CONFIG_PCI_IOV))
1681                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1682
1683         for (i = 0; i < num_vport; i++) {
1684                 vport->back = hdev;
1685                 vport->vport_id = i;
1686                 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1687                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1688                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1689                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1690                 INIT_LIST_HEAD(&vport->vlan_list);
1691                 INIT_LIST_HEAD(&vport->uc_mac_list);
1692                 INIT_LIST_HEAD(&vport->mc_mac_list);
1693
1694                 if (i == 0)
1695                         ret = hclge_vport_setup(vport, tqp_main_vport);
1696                 else
1697                         ret = hclge_vport_setup(vport, tqp_per_vport);
1698                 if (ret) {
1699                         dev_err(&pdev->dev,
1700                                 "vport setup failed for vport %d, %d\n",
1701                                 i, ret);
1702                         return ret;
1703                 }
1704
1705                 vport++;
1706         }
1707
1708         return 0;
1709 }
1710
1711 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1712                                     struct hclge_pkt_buf_alloc *buf_alloc)
1713 {
1714 /* TX buffer size is unit by 128 byte */
1715 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1716 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1717         struct hclge_tx_buff_alloc_cmd *req;
1718         struct hclge_desc desc;
1719         int ret;
1720         u8 i;
1721
1722         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1723
1724         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1725         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1726                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1727
1728                 req->tx_pkt_buff[i] =
1729                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1730                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1731         }
1732
1733         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1734         if (ret)
1735                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1736                         ret);
1737
1738         return ret;
1739 }
1740
1741 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1742                                  struct hclge_pkt_buf_alloc *buf_alloc)
1743 {
1744         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1745
1746         if (ret)
1747                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1748
1749         return ret;
1750 }
1751
1752 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1753 {
1754         unsigned int i;
1755         u32 cnt = 0;
1756
1757         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1758                 if (hdev->hw_tc_map & BIT(i))
1759                         cnt++;
1760         return cnt;
1761 }
1762
1763 /* Get the number of pfc enabled TCs, which have private buffer */
1764 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1765                                   struct hclge_pkt_buf_alloc *buf_alloc)
1766 {
1767         struct hclge_priv_buf *priv;
1768         unsigned int i;
1769         int cnt = 0;
1770
1771         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1772                 priv = &buf_alloc->priv_buf[i];
1773                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1774                     priv->enable)
1775                         cnt++;
1776         }
1777
1778         return cnt;
1779 }
1780
1781 /* Get the number of pfc disabled TCs, which have private buffer */
1782 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1783                                      struct hclge_pkt_buf_alloc *buf_alloc)
1784 {
1785         struct hclge_priv_buf *priv;
1786         unsigned int i;
1787         int cnt = 0;
1788
1789         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1790                 priv = &buf_alloc->priv_buf[i];
1791                 if (hdev->hw_tc_map & BIT(i) &&
1792                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1793                     priv->enable)
1794                         cnt++;
1795         }
1796
1797         return cnt;
1798 }
1799
1800 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1801 {
1802         struct hclge_priv_buf *priv;
1803         u32 rx_priv = 0;
1804         int i;
1805
1806         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1807                 priv = &buf_alloc->priv_buf[i];
1808                 if (priv->enable)
1809                         rx_priv += priv->buf_size;
1810         }
1811         return rx_priv;
1812 }
1813
1814 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1815 {
1816         u32 i, total_tx_size = 0;
1817
1818         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1819                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1820
1821         return total_tx_size;
1822 }
1823
1824 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1825                                 struct hclge_pkt_buf_alloc *buf_alloc,
1826                                 u32 rx_all)
1827 {
1828         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1829         u32 tc_num = hclge_get_tc_num(hdev);
1830         u32 shared_buf, aligned_mps;
1831         u32 rx_priv;
1832         int i;
1833
1834         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1835
1836         if (hnae3_dev_dcb_supported(hdev))
1837                 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1838                                         hdev->dv_buf_size;
1839         else
1840                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1841                                         + hdev->dv_buf_size;
1842
1843         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1844         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1845                              HCLGE_BUF_SIZE_UNIT);
1846
1847         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1848         if (rx_all < rx_priv + shared_std)
1849                 return false;
1850
1851         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1852         buf_alloc->s_buf.buf_size = shared_buf;
1853         if (hnae3_dev_dcb_supported(hdev)) {
1854                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1855                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1856                         - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1857                                   HCLGE_BUF_SIZE_UNIT);
1858         } else {
1859                 buf_alloc->s_buf.self.high = aligned_mps +
1860                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1861                 buf_alloc->s_buf.self.low = aligned_mps;
1862         }
1863
1864         if (hnae3_dev_dcb_supported(hdev)) {
1865                 hi_thrd = shared_buf - hdev->dv_buf_size;
1866
1867                 if (tc_num <= NEED_RESERVE_TC_NUM)
1868                         hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1869                                         / BUF_MAX_PERCENT;
1870
1871                 if (tc_num)
1872                         hi_thrd = hi_thrd / tc_num;
1873
1874                 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1875                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1876                 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1877         } else {
1878                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1879                 lo_thrd = aligned_mps;
1880         }
1881
1882         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1883                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1884                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1885         }
1886
1887         return true;
1888 }
1889
1890 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1891                                 struct hclge_pkt_buf_alloc *buf_alloc)
1892 {
1893         u32 i, total_size;
1894
1895         total_size = hdev->pkt_buf_size;
1896
1897         /* alloc tx buffer for all enabled tc */
1898         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1899                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1900
1901                 if (hdev->hw_tc_map & BIT(i)) {
1902                         if (total_size < hdev->tx_buf_size)
1903                                 return -ENOMEM;
1904
1905                         priv->tx_buf_size = hdev->tx_buf_size;
1906                 } else {
1907                         priv->tx_buf_size = 0;
1908                 }
1909
1910                 total_size -= priv->tx_buf_size;
1911         }
1912
1913         return 0;
1914 }
1915
1916 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1917                                   struct hclge_pkt_buf_alloc *buf_alloc)
1918 {
1919         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1920         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1921         unsigned int i;
1922
1923         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1924                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1925
1926                 priv->enable = 0;
1927                 priv->wl.low = 0;
1928                 priv->wl.high = 0;
1929                 priv->buf_size = 0;
1930
1931                 if (!(hdev->hw_tc_map & BIT(i)))
1932                         continue;
1933
1934                 priv->enable = 1;
1935
1936                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1937                         priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1938                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
1939                                                 HCLGE_BUF_SIZE_UNIT);
1940                 } else {
1941                         priv->wl.low = 0;
1942                         priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1943                                         aligned_mps;
1944                 }
1945
1946                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1947         }
1948
1949         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1950 }
1951
1952 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1953                                           struct hclge_pkt_buf_alloc *buf_alloc)
1954 {
1955         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1956         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1957         int i;
1958
1959         /* let the last to be cleared first */
1960         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1961                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1962                 unsigned int mask = BIT((unsigned int)i);
1963
1964                 if (hdev->hw_tc_map & mask &&
1965                     !(hdev->tm_info.hw_pfc_map & mask)) {
1966                         /* Clear the no pfc TC private buffer */
1967                         priv->wl.low = 0;
1968                         priv->wl.high = 0;
1969                         priv->buf_size = 0;
1970                         priv->enable = 0;
1971                         no_pfc_priv_num--;
1972                 }
1973
1974                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1975                     no_pfc_priv_num == 0)
1976                         break;
1977         }
1978
1979         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1980 }
1981
1982 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1983                                         struct hclge_pkt_buf_alloc *buf_alloc)
1984 {
1985         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1986         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1987         int i;
1988
1989         /* let the last to be cleared first */
1990         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1991                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1992                 unsigned int mask = BIT((unsigned int)i);
1993
1994                 if (hdev->hw_tc_map & mask &&
1995                     hdev->tm_info.hw_pfc_map & mask) {
1996                         /* Reduce the number of pfc TC with private buffer */
1997                         priv->wl.low = 0;
1998                         priv->enable = 0;
1999                         priv->wl.high = 0;
2000                         priv->buf_size = 0;
2001                         pfc_priv_num--;
2002                 }
2003
2004                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2005                     pfc_priv_num == 0)
2006                         break;
2007         }
2008
2009         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2010 }
2011
2012 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2013                                       struct hclge_pkt_buf_alloc *buf_alloc)
2014 {
2015 #define COMPENSATE_BUFFER       0x3C00
2016 #define COMPENSATE_HALF_MPS_NUM 5
2017 #define PRIV_WL_GAP             0x1800
2018
2019         u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2020         u32 tc_num = hclge_get_tc_num(hdev);
2021         u32 half_mps = hdev->mps >> 1;
2022         u32 min_rx_priv;
2023         unsigned int i;
2024
2025         if (tc_num)
2026                 rx_priv = rx_priv / tc_num;
2027
2028         if (tc_num <= NEED_RESERVE_TC_NUM)
2029                 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2030
2031         min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2032                         COMPENSATE_HALF_MPS_NUM * half_mps;
2033         min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2034         rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2035
2036         if (rx_priv < min_rx_priv)
2037                 return false;
2038
2039         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2040                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2041
2042                 priv->enable = 0;
2043                 priv->wl.low = 0;
2044                 priv->wl.high = 0;
2045                 priv->buf_size = 0;
2046
2047                 if (!(hdev->hw_tc_map & BIT(i)))
2048                         continue;
2049
2050                 priv->enable = 1;
2051                 priv->buf_size = rx_priv;
2052                 priv->wl.high = rx_priv - hdev->dv_buf_size;
2053                 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2054         }
2055
2056         buf_alloc->s_buf.buf_size = 0;
2057
2058         return true;
2059 }
2060
2061 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2062  * @hdev: pointer to struct hclge_dev
2063  * @buf_alloc: pointer to buffer calculation data
2064  * @return: 0: calculate sucessful, negative: fail
2065  */
2066 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2067                                 struct hclge_pkt_buf_alloc *buf_alloc)
2068 {
2069         /* When DCB is not supported, rx private buffer is not allocated. */
2070         if (!hnae3_dev_dcb_supported(hdev)) {
2071                 u32 rx_all = hdev->pkt_buf_size;
2072
2073                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2074                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2075                         return -ENOMEM;
2076
2077                 return 0;
2078         }
2079
2080         if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2081                 return 0;
2082
2083         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2084                 return 0;
2085
2086         /* try to decrease the buffer size */
2087         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2088                 return 0;
2089
2090         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2091                 return 0;
2092
2093         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2094                 return 0;
2095
2096         return -ENOMEM;
2097 }
2098
2099 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2100                                    struct hclge_pkt_buf_alloc *buf_alloc)
2101 {
2102         struct hclge_rx_priv_buff_cmd *req;
2103         struct hclge_desc desc;
2104         int ret;
2105         int i;
2106
2107         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2108         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2109
2110         /* Alloc private buffer TCs */
2111         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2112                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2113
2114                 req->buf_num[i] =
2115                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2116                 req->buf_num[i] |=
2117                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2118         }
2119
2120         req->shared_buf =
2121                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2122                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
2123
2124         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2125         if (ret)
2126                 dev_err(&hdev->pdev->dev,
2127                         "rx private buffer alloc cmd failed %d\n", ret);
2128
2129         return ret;
2130 }
2131
2132 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2133                                    struct hclge_pkt_buf_alloc *buf_alloc)
2134 {
2135         struct hclge_rx_priv_wl_buf *req;
2136         struct hclge_priv_buf *priv;
2137         struct hclge_desc desc[2];
2138         int i, j;
2139         int ret;
2140
2141         for (i = 0; i < 2; i++) {
2142                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2143                                            false);
2144                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2145
2146                 /* The first descriptor set the NEXT bit to 1 */
2147                 if (i == 0)
2148                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2149                 else
2150                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2151
2152                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2153                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2154
2155                         priv = &buf_alloc->priv_buf[idx];
2156                         req->tc_wl[j].high =
2157                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2158                         req->tc_wl[j].high |=
2159                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2160                         req->tc_wl[j].low =
2161                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2162                         req->tc_wl[j].low |=
2163                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2164                 }
2165         }
2166
2167         /* Send 2 descriptor at one time */
2168         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2169         if (ret)
2170                 dev_err(&hdev->pdev->dev,
2171                         "rx private waterline config cmd failed %d\n",
2172                         ret);
2173         return ret;
2174 }
2175
2176 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2177                                     struct hclge_pkt_buf_alloc *buf_alloc)
2178 {
2179         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2180         struct hclge_rx_com_thrd *req;
2181         struct hclge_desc desc[2];
2182         struct hclge_tc_thrd *tc;
2183         int i, j;
2184         int ret;
2185
2186         for (i = 0; i < 2; i++) {
2187                 hclge_cmd_setup_basic_desc(&desc[i],
2188                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2189                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2190
2191                 /* The first descriptor set the NEXT bit to 1 */
2192                 if (i == 0)
2193                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2194                 else
2195                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2196
2197                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2198                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2199
2200                         req->com_thrd[j].high =
2201                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2202                         req->com_thrd[j].high |=
2203                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2204                         req->com_thrd[j].low =
2205                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2206                         req->com_thrd[j].low |=
2207                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2208                 }
2209         }
2210
2211         /* Send 2 descriptors at one time */
2212         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2213         if (ret)
2214                 dev_err(&hdev->pdev->dev,
2215                         "common threshold config cmd failed %d\n", ret);
2216         return ret;
2217 }
2218
2219 static int hclge_common_wl_config(struct hclge_dev *hdev,
2220                                   struct hclge_pkt_buf_alloc *buf_alloc)
2221 {
2222         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2223         struct hclge_rx_com_wl *req;
2224         struct hclge_desc desc;
2225         int ret;
2226
2227         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2228
2229         req = (struct hclge_rx_com_wl *)desc.data;
2230         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2231         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2232
2233         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2234         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2235
2236         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2237         if (ret)
2238                 dev_err(&hdev->pdev->dev,
2239                         "common waterline config cmd failed %d\n", ret);
2240
2241         return ret;
2242 }
2243
2244 int hclge_buffer_alloc(struct hclge_dev *hdev)
2245 {
2246         struct hclge_pkt_buf_alloc *pkt_buf;
2247         int ret;
2248
2249         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2250         if (!pkt_buf)
2251                 return -ENOMEM;
2252
2253         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2254         if (ret) {
2255                 dev_err(&hdev->pdev->dev,
2256                         "could not calc tx buffer size for all TCs %d\n", ret);
2257                 goto out;
2258         }
2259
2260         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2261         if (ret) {
2262                 dev_err(&hdev->pdev->dev,
2263                         "could not alloc tx buffers %d\n", ret);
2264                 goto out;
2265         }
2266
2267         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2268         if (ret) {
2269                 dev_err(&hdev->pdev->dev,
2270                         "could not calc rx priv buffer size for all TCs %d\n",
2271                         ret);
2272                 goto out;
2273         }
2274
2275         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2276         if (ret) {
2277                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2278                         ret);
2279                 goto out;
2280         }
2281
2282         if (hnae3_dev_dcb_supported(hdev)) {
2283                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2284                 if (ret) {
2285                         dev_err(&hdev->pdev->dev,
2286                                 "could not configure rx private waterline %d\n",
2287                                 ret);
2288                         goto out;
2289                 }
2290
2291                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2292                 if (ret) {
2293                         dev_err(&hdev->pdev->dev,
2294                                 "could not configure common threshold %d\n",
2295                                 ret);
2296                         goto out;
2297                 }
2298         }
2299
2300         ret = hclge_common_wl_config(hdev, pkt_buf);
2301         if (ret)
2302                 dev_err(&hdev->pdev->dev,
2303                         "could not configure common waterline %d\n", ret);
2304
2305 out:
2306         kfree(pkt_buf);
2307         return ret;
2308 }
2309
2310 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2311 {
2312         struct hnae3_handle *roce = &vport->roce;
2313         struct hnae3_handle *nic = &vport->nic;
2314
2315         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2316
2317         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2318             vport->back->num_msi_left == 0)
2319                 return -EINVAL;
2320
2321         roce->rinfo.base_vector = vport->back->roce_base_vector;
2322
2323         roce->rinfo.netdev = nic->kinfo.netdev;
2324         roce->rinfo.roce_io_base = vport->back->hw.io_base;
2325
2326         roce->pdev = nic->pdev;
2327         roce->ae_algo = nic->ae_algo;
2328         roce->numa_node_mask = nic->numa_node_mask;
2329
2330         return 0;
2331 }
2332
2333 static int hclge_init_msi(struct hclge_dev *hdev)
2334 {
2335         struct pci_dev *pdev = hdev->pdev;
2336         int vectors;
2337         int i;
2338
2339         vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2340                                         hdev->num_msi,
2341                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2342         if (vectors < 0) {
2343                 dev_err(&pdev->dev,
2344                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2345                         vectors);
2346                 return vectors;
2347         }
2348         if (vectors < hdev->num_msi)
2349                 dev_warn(&hdev->pdev->dev,
2350                          "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2351                          hdev->num_msi, vectors);
2352
2353         hdev->num_msi = vectors;
2354         hdev->num_msi_left = vectors;
2355
2356         hdev->base_msi_vector = pdev->irq;
2357         hdev->roce_base_vector = hdev->base_msi_vector +
2358                                 hdev->roce_base_msix_offset;
2359
2360         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2361                                            sizeof(u16), GFP_KERNEL);
2362         if (!hdev->vector_status) {
2363                 pci_free_irq_vectors(pdev);
2364                 return -ENOMEM;
2365         }
2366
2367         for (i = 0; i < hdev->num_msi; i++)
2368                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2369
2370         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2371                                         sizeof(int), GFP_KERNEL);
2372         if (!hdev->vector_irq) {
2373                 pci_free_irq_vectors(pdev);
2374                 return -ENOMEM;
2375         }
2376
2377         return 0;
2378 }
2379
2380 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2381 {
2382         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2383                 duplex = HCLGE_MAC_FULL;
2384
2385         return duplex;
2386 }
2387
2388 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2389                                       u8 duplex)
2390 {
2391         struct hclge_config_mac_speed_dup_cmd *req;
2392         struct hclge_desc desc;
2393         int ret;
2394
2395         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2396
2397         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2398
2399         if (duplex)
2400                 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2401
2402         switch (speed) {
2403         case HCLGE_MAC_SPEED_10M:
2404                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2405                                 HCLGE_CFG_SPEED_S, 6);
2406                 break;
2407         case HCLGE_MAC_SPEED_100M:
2408                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2409                                 HCLGE_CFG_SPEED_S, 7);
2410                 break;
2411         case HCLGE_MAC_SPEED_1G:
2412                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2413                                 HCLGE_CFG_SPEED_S, 0);
2414                 break;
2415         case HCLGE_MAC_SPEED_10G:
2416                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2417                                 HCLGE_CFG_SPEED_S, 1);
2418                 break;
2419         case HCLGE_MAC_SPEED_25G:
2420                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2421                                 HCLGE_CFG_SPEED_S, 2);
2422                 break;
2423         case HCLGE_MAC_SPEED_40G:
2424                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2425                                 HCLGE_CFG_SPEED_S, 3);
2426                 break;
2427         case HCLGE_MAC_SPEED_50G:
2428                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2429                                 HCLGE_CFG_SPEED_S, 4);
2430                 break;
2431         case HCLGE_MAC_SPEED_100G:
2432                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2433                                 HCLGE_CFG_SPEED_S, 5);
2434                 break;
2435         default:
2436                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2437                 return -EINVAL;
2438         }
2439
2440         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2441                       1);
2442
2443         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2444         if (ret) {
2445                 dev_err(&hdev->pdev->dev,
2446                         "mac speed/duplex config cmd failed %d.\n", ret);
2447                 return ret;
2448         }
2449
2450         return 0;
2451 }
2452
2453 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2454 {
2455         int ret;
2456
2457         duplex = hclge_check_speed_dup(duplex, speed);
2458         if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2459                 return 0;
2460
2461         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2462         if (ret)
2463                 return ret;
2464
2465         hdev->hw.mac.speed = speed;
2466         hdev->hw.mac.duplex = duplex;
2467
2468         return 0;
2469 }
2470
2471 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2472                                      u8 duplex)
2473 {
2474         struct hclge_vport *vport = hclge_get_vport(handle);
2475         struct hclge_dev *hdev = vport->back;
2476
2477         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2478 }
2479
2480 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2481 {
2482         struct hclge_config_auto_neg_cmd *req;
2483         struct hclge_desc desc;
2484         u32 flag = 0;
2485         int ret;
2486
2487         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2488
2489         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2490         if (enable)
2491                 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2492         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2493
2494         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2495         if (ret)
2496                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2497                         ret);
2498
2499         return ret;
2500 }
2501
2502 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2503 {
2504         struct hclge_vport *vport = hclge_get_vport(handle);
2505         struct hclge_dev *hdev = vport->back;
2506
2507         if (!hdev->hw.mac.support_autoneg) {
2508                 if (enable) {
2509                         dev_err(&hdev->pdev->dev,
2510                                 "autoneg is not supported by current port\n");
2511                         return -EOPNOTSUPP;
2512                 } else {
2513                         return 0;
2514                 }
2515         }
2516
2517         return hclge_set_autoneg_en(hdev, enable);
2518 }
2519
2520 static int hclge_get_autoneg(struct hnae3_handle *handle)
2521 {
2522         struct hclge_vport *vport = hclge_get_vport(handle);
2523         struct hclge_dev *hdev = vport->back;
2524         struct phy_device *phydev = hdev->hw.mac.phydev;
2525
2526         if (phydev)
2527                 return phydev->autoneg;
2528
2529         return hdev->hw.mac.autoneg;
2530 }
2531
2532 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2533 {
2534         struct hclge_vport *vport = hclge_get_vport(handle);
2535         struct hclge_dev *hdev = vport->back;
2536         int ret;
2537
2538         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2539
2540         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2541         if (ret)
2542                 return ret;
2543         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2544 }
2545
2546 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2547 {
2548         struct hclge_vport *vport = hclge_get_vport(handle);
2549         struct hclge_dev *hdev = vport->back;
2550
2551         if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2552                 return hclge_set_autoneg_en(hdev, !halt);
2553
2554         return 0;
2555 }
2556
2557 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2558 {
2559         struct hclge_config_fec_cmd *req;
2560         struct hclge_desc desc;
2561         int ret;
2562
2563         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2564
2565         req = (struct hclge_config_fec_cmd *)desc.data;
2566         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2567                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2568         if (fec_mode & BIT(HNAE3_FEC_RS))
2569                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2570                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2571         if (fec_mode & BIT(HNAE3_FEC_BASER))
2572                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2573                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2574
2575         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2576         if (ret)
2577                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2578
2579         return ret;
2580 }
2581
2582 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2583 {
2584         struct hclge_vport *vport = hclge_get_vport(handle);
2585         struct hclge_dev *hdev = vport->back;
2586         struct hclge_mac *mac = &hdev->hw.mac;
2587         int ret;
2588
2589         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2590                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2591                 return -EINVAL;
2592         }
2593
2594         ret = hclge_set_fec_hw(hdev, fec_mode);
2595         if (ret)
2596                 return ret;
2597
2598         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2599         return 0;
2600 }
2601
2602 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2603                           u8 *fec_mode)
2604 {
2605         struct hclge_vport *vport = hclge_get_vport(handle);
2606         struct hclge_dev *hdev = vport->back;
2607         struct hclge_mac *mac = &hdev->hw.mac;
2608
2609         if (fec_ability)
2610                 *fec_ability = mac->fec_ability;
2611         if (fec_mode)
2612                 *fec_mode = mac->fec_mode;
2613 }
2614
2615 static int hclge_mac_init(struct hclge_dev *hdev)
2616 {
2617         struct hclge_mac *mac = &hdev->hw.mac;
2618         int ret;
2619
2620         hdev->support_sfp_query = true;
2621         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2622         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2623                                          hdev->hw.mac.duplex);
2624         if (ret) {
2625                 dev_err(&hdev->pdev->dev,
2626                         "Config mac speed dup fail ret=%d\n", ret);
2627                 return ret;
2628         }
2629
2630         if (hdev->hw.mac.support_autoneg) {
2631                 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2632                 if (ret) {
2633                         dev_err(&hdev->pdev->dev,
2634                                 "Config mac autoneg fail ret=%d\n", ret);
2635                         return ret;
2636                 }
2637         }
2638
2639         mac->link = 0;
2640
2641         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2642                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2643                 if (ret) {
2644                         dev_err(&hdev->pdev->dev,
2645                                 "Fec mode init fail, ret = %d\n", ret);
2646                         return ret;
2647                 }
2648         }
2649
2650         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2651         if (ret) {
2652                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2653                 return ret;
2654         }
2655
2656         ret = hclge_set_default_loopback(hdev);
2657         if (ret)
2658                 return ret;
2659
2660         ret = hclge_buffer_alloc(hdev);
2661         if (ret)
2662                 dev_err(&hdev->pdev->dev,
2663                         "allocate buffer fail, ret=%d\n", ret);
2664
2665         return ret;
2666 }
2667
2668 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2669 {
2670         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2671             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2672                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2673                                     hclge_wq, &hdev->service_task, 0);
2674 }
2675
2676 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2677 {
2678         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2679             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2680                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2681                                     hclge_wq, &hdev->service_task, 0);
2682 }
2683
2684 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2685 {
2686         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2687             !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2688                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2689                                     hclge_wq, &hdev->service_task,
2690                                     delay_time);
2691 }
2692
2693 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2694 {
2695         struct hclge_link_status_cmd *req;
2696         struct hclge_desc desc;
2697         int link_status;
2698         int ret;
2699
2700         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2701         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2702         if (ret) {
2703                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2704                         ret);
2705                 return ret;
2706         }
2707
2708         req = (struct hclge_link_status_cmd *)desc.data;
2709         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2710
2711         return !!link_status;
2712 }
2713
2714 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2715 {
2716         unsigned int mac_state;
2717         int link_stat;
2718
2719         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2720                 return 0;
2721
2722         mac_state = hclge_get_mac_link_status(hdev);
2723
2724         if (hdev->hw.mac.phydev) {
2725                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2726                         link_stat = mac_state &
2727                                 hdev->hw.mac.phydev->link;
2728                 else
2729                         link_stat = 0;
2730
2731         } else {
2732                 link_stat = mac_state;
2733         }
2734
2735         return !!link_stat;
2736 }
2737
2738 static void hclge_update_link_status(struct hclge_dev *hdev)
2739 {
2740         struct hnae3_client *rclient = hdev->roce_client;
2741         struct hnae3_client *client = hdev->nic_client;
2742         struct hnae3_handle *rhandle;
2743         struct hnae3_handle *handle;
2744         int state;
2745         int i;
2746
2747         if (!client)
2748                 return;
2749
2750         if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2751                 return;
2752
2753         state = hclge_get_mac_phy_link(hdev);
2754         if (state != hdev->hw.mac.link) {
2755                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2756                         handle = &hdev->vport[i].nic;
2757                         client->ops->link_status_change(handle, state);
2758                         hclge_config_mac_tnl_int(hdev, state);
2759                         rhandle = &hdev->vport[i].roce;
2760                         if (rclient && rclient->ops->link_status_change)
2761                                 rclient->ops->link_status_change(rhandle,
2762                                                                  state);
2763                 }
2764                 hdev->hw.mac.link = state;
2765         }
2766
2767         clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2768 }
2769
2770 static void hclge_update_port_capability(struct hclge_mac *mac)
2771 {
2772         /* update fec ability by speed */
2773         hclge_convert_setting_fec(mac);
2774
2775         /* firmware can not identify back plane type, the media type
2776          * read from configuration can help deal it
2777          */
2778         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2779             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2780                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2781         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2782                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2783
2784         if (mac->support_autoneg) {
2785                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2786                 linkmode_copy(mac->advertising, mac->supported);
2787         } else {
2788                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2789                                    mac->supported);
2790                 linkmode_zero(mac->advertising);
2791         }
2792 }
2793
2794 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2795 {
2796         struct hclge_sfp_info_cmd *resp;
2797         struct hclge_desc desc;
2798         int ret;
2799
2800         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2801         resp = (struct hclge_sfp_info_cmd *)desc.data;
2802         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2803         if (ret == -EOPNOTSUPP) {
2804                 dev_warn(&hdev->pdev->dev,
2805                          "IMP do not support get SFP speed %d\n", ret);
2806                 return ret;
2807         } else if (ret) {
2808                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2809                 return ret;
2810         }
2811
2812         *speed = le32_to_cpu(resp->speed);
2813
2814         return 0;
2815 }
2816
2817 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2818 {
2819         struct hclge_sfp_info_cmd *resp;
2820         struct hclge_desc desc;
2821         int ret;
2822
2823         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2824         resp = (struct hclge_sfp_info_cmd *)desc.data;
2825
2826         resp->query_type = QUERY_ACTIVE_SPEED;
2827
2828         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2829         if (ret == -EOPNOTSUPP) {
2830                 dev_warn(&hdev->pdev->dev,
2831                          "IMP does not support get SFP info %d\n", ret);
2832                 return ret;
2833         } else if (ret) {
2834                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2835                 return ret;
2836         }
2837
2838         /* In some case, mac speed get from IMP may be 0, it shouldn't be
2839          * set to mac->speed.
2840          */
2841         if (!le32_to_cpu(resp->speed))
2842                 return 0;
2843
2844         mac->speed = le32_to_cpu(resp->speed);
2845         /* if resp->speed_ability is 0, it means it's an old version
2846          * firmware, do not update these params
2847          */
2848         if (resp->speed_ability) {
2849                 mac->module_type = le32_to_cpu(resp->module_type);
2850                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2851                 mac->autoneg = resp->autoneg;
2852                 mac->support_autoneg = resp->autoneg_ability;
2853                 mac->speed_type = QUERY_ACTIVE_SPEED;
2854                 if (!resp->active_fec)
2855                         mac->fec_mode = 0;
2856                 else
2857                         mac->fec_mode = BIT(resp->active_fec);
2858         } else {
2859                 mac->speed_type = QUERY_SFP_SPEED;
2860         }
2861
2862         return 0;
2863 }
2864
2865 static int hclge_update_port_info(struct hclge_dev *hdev)
2866 {
2867         struct hclge_mac *mac = &hdev->hw.mac;
2868         int speed = HCLGE_MAC_SPEED_UNKNOWN;
2869         int ret;
2870
2871         /* get the port info from SFP cmd if not copper port */
2872         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2873                 return 0;
2874
2875         /* if IMP does not support get SFP/qSFP info, return directly */
2876         if (!hdev->support_sfp_query)
2877                 return 0;
2878
2879         if (hdev->pdev->revision >= 0x21)
2880                 ret = hclge_get_sfp_info(hdev, mac);
2881         else
2882                 ret = hclge_get_sfp_speed(hdev, &speed);
2883
2884         if (ret == -EOPNOTSUPP) {
2885                 hdev->support_sfp_query = false;
2886                 return ret;
2887         } else if (ret) {
2888                 return ret;
2889         }
2890
2891         if (hdev->pdev->revision >= 0x21) {
2892                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2893                         hclge_update_port_capability(mac);
2894                         return 0;
2895                 }
2896                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2897                                                HCLGE_MAC_FULL);
2898         } else {
2899                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2900                         return 0; /* do nothing if no SFP */
2901
2902                 /* must config full duplex for SFP */
2903                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2904         }
2905 }
2906
2907 static int hclge_get_status(struct hnae3_handle *handle)
2908 {
2909         struct hclge_vport *vport = hclge_get_vport(handle);
2910         struct hclge_dev *hdev = vport->back;
2911
2912         hclge_update_link_status(hdev);
2913
2914         return hdev->hw.mac.link;
2915 }
2916
2917 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
2918 {
2919         if (pci_num_vf(hdev->pdev) == 0) {
2920                 dev_err(&hdev->pdev->dev,
2921                         "SRIOV is disabled, can not get vport(%d) info.\n", vf);
2922                 return NULL;
2923         }
2924
2925         if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
2926                 dev_err(&hdev->pdev->dev,
2927                         "vf id(%d) is out of range(0 <= vfid < %d)\n",
2928                         vf, pci_num_vf(hdev->pdev));
2929                 return NULL;
2930         }
2931
2932         /* VF start from 1 in vport */
2933         vf += HCLGE_VF_VPORT_START_NUM;
2934         return &hdev->vport[vf];
2935 }
2936
2937 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
2938                                struct ifla_vf_info *ivf)
2939 {
2940         struct hclge_vport *vport = hclge_get_vport(handle);
2941         struct hclge_dev *hdev = vport->back;
2942
2943         vport = hclge_get_vf_vport(hdev, vf);
2944         if (!vport)
2945                 return -EINVAL;
2946
2947         ivf->vf = vf;
2948         ivf->linkstate = vport->vf_info.link_state;
2949         ivf->spoofchk = vport->vf_info.spoofchk;
2950         ivf->trusted = vport->vf_info.trusted;
2951         ivf->min_tx_rate = 0;
2952         ivf->max_tx_rate = vport->vf_info.max_tx_rate;
2953         ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
2954         ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
2955         ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
2956         ether_addr_copy(ivf->mac, vport->vf_info.mac);
2957
2958         return 0;
2959 }
2960
2961 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
2962                                    int link_state)
2963 {
2964         struct hclge_vport *vport = hclge_get_vport(handle);
2965         struct hclge_dev *hdev = vport->back;
2966
2967         vport = hclge_get_vf_vport(hdev, vf);
2968         if (!vport)
2969                 return -EINVAL;
2970
2971         vport->vf_info.link_state = link_state;
2972
2973         return 0;
2974 }
2975
2976 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2977 {
2978         u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2979
2980         /* fetch the events from their corresponding regs */
2981         rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2982         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2983         msix_src_reg = hclge_read_dev(&hdev->hw,
2984                                       HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2985
2986         /* Assumption: If by any chance reset and mailbox events are reported
2987          * together then we will only process reset event in this go and will
2988          * defer the processing of the mailbox events. Since, we would have not
2989          * cleared RX CMDQ event this time we would receive again another
2990          * interrupt from H/W just for the mailbox.
2991          *
2992          * check for vector0 reset event sources
2993          */
2994         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2995                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2996                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2997                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2998                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2999                 hdev->rst_stats.imp_rst_cnt++;
3000                 return HCLGE_VECTOR0_EVENT_RST;
3001         }
3002
3003         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
3004                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3005                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3006                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3007                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3008                 hdev->rst_stats.global_rst_cnt++;
3009                 return HCLGE_VECTOR0_EVENT_RST;
3010         }
3011
3012         /* check for vector0 msix event source */
3013         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3014                 *clearval = msix_src_reg;
3015                 return HCLGE_VECTOR0_EVENT_ERR;
3016         }
3017
3018         /* check for vector0 mailbox(=CMDQ RX) event source */
3019         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3020                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3021                 *clearval = cmdq_src_reg;
3022                 return HCLGE_VECTOR0_EVENT_MBX;
3023         }
3024
3025         /* print other vector0 event source */
3026         dev_info(&hdev->pdev->dev,
3027                  "CMDQ INT status:0x%x, other INT status:0x%x\n",
3028                  cmdq_src_reg, msix_src_reg);
3029         *clearval = msix_src_reg;
3030
3031         return HCLGE_VECTOR0_EVENT_OTHER;
3032 }
3033
3034 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3035                                     u32 regclr)
3036 {
3037         switch (event_type) {
3038         case HCLGE_VECTOR0_EVENT_RST:
3039                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3040                 break;
3041         case HCLGE_VECTOR0_EVENT_MBX:
3042                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3043                 break;
3044         default:
3045                 break;
3046         }
3047 }
3048
3049 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3050 {
3051         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3052                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3053                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3054                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3055         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3056 }
3057
3058 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3059 {
3060         writel(enable ? 1 : 0, vector->addr);
3061 }
3062
3063 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3064 {
3065         struct hclge_dev *hdev = data;
3066         u32 clearval = 0;
3067         u32 event_cause;
3068
3069         hclge_enable_vector(&hdev->misc_vector, false);
3070         event_cause = hclge_check_event_cause(hdev, &clearval);
3071
3072         /* vector 0 interrupt is shared with reset and mailbox source events.*/
3073         switch (event_cause) {
3074         case HCLGE_VECTOR0_EVENT_ERR:
3075                 /* we do not know what type of reset is required now. This could
3076                  * only be decided after we fetch the type of errors which
3077                  * caused this event. Therefore, we will do below for now:
3078                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3079                  *    have defered type of reset to be used.
3080                  * 2. Schedule the reset serivce task.
3081                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3082                  *    will fetch the correct type of reset.  This would be done
3083                  *    by first decoding the types of errors.
3084                  */
3085                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3086                 /* fall through */
3087         case HCLGE_VECTOR0_EVENT_RST:
3088                 hclge_reset_task_schedule(hdev);
3089                 break;
3090         case HCLGE_VECTOR0_EVENT_MBX:
3091                 /* If we are here then,
3092                  * 1. Either we are not handling any mbx task and we are not
3093                  *    scheduled as well
3094                  *                        OR
3095                  * 2. We could be handling a mbx task but nothing more is
3096                  *    scheduled.
3097                  * In both cases, we should schedule mbx task as there are more
3098                  * mbx messages reported by this interrupt.
3099                  */
3100                 hclge_mbx_task_schedule(hdev);
3101                 break;
3102         default:
3103                 dev_warn(&hdev->pdev->dev,
3104                          "received unknown or unhandled event of vector0\n");
3105                 break;
3106         }
3107
3108         hclge_clear_event_cause(hdev, event_cause, clearval);
3109
3110         /* Enable interrupt if it is not cause by reset. And when
3111          * clearval equal to 0, it means interrupt status may be
3112          * cleared by hardware before driver reads status register.
3113          * For this case, vector0 interrupt also should be enabled.
3114          */
3115         if (!clearval ||
3116             event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3117                 hclge_enable_vector(&hdev->misc_vector, true);
3118         }
3119
3120         return IRQ_HANDLED;
3121 }
3122
3123 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3124 {
3125         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3126                 dev_warn(&hdev->pdev->dev,
3127                          "vector(vector_id %d) has been freed.\n", vector_id);
3128                 return;
3129         }
3130
3131         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3132         hdev->num_msi_left += 1;
3133         hdev->num_msi_used -= 1;
3134 }
3135
3136 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3137 {
3138         struct hclge_misc_vector *vector = &hdev->misc_vector;
3139
3140         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3141
3142         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3143         hdev->vector_status[0] = 0;
3144
3145         hdev->num_msi_left -= 1;
3146         hdev->num_msi_used += 1;
3147 }
3148
3149 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3150                                       const cpumask_t *mask)
3151 {
3152         struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3153                                               affinity_notify);
3154
3155         cpumask_copy(&hdev->affinity_mask, mask);
3156 }
3157
3158 static void hclge_irq_affinity_release(struct kref *ref)
3159 {
3160 }
3161
3162 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3163 {
3164         irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3165                               &hdev->affinity_mask);
3166
3167         hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3168         hdev->affinity_notify.release = hclge_irq_affinity_release;
3169         irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3170                                   &hdev->affinity_notify);
3171 }
3172
3173 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3174 {
3175         irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3176         irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3177 }
3178
3179 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3180 {
3181         int ret;
3182
3183         hclge_get_misc_vector(hdev);
3184
3185         /* this would be explicitly freed in the end */
3186         snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3187                  HCLGE_NAME, pci_name(hdev->pdev));
3188         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3189                           0, hdev->misc_vector.name, hdev);
3190         if (ret) {
3191                 hclge_free_vector(hdev, 0);
3192                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3193                         hdev->misc_vector.vector_irq);
3194         }
3195
3196         return ret;
3197 }
3198
3199 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3200 {
3201         free_irq(hdev->misc_vector.vector_irq, hdev);
3202         hclge_free_vector(hdev, 0);
3203 }
3204
3205 int hclge_notify_client(struct hclge_dev *hdev,
3206                         enum hnae3_reset_notify_type type)
3207 {
3208         struct hnae3_client *client = hdev->nic_client;
3209         u16 i;
3210
3211         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3212                 return 0;
3213
3214         if (!client->ops->reset_notify)
3215                 return -EOPNOTSUPP;
3216
3217         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3218                 struct hnae3_handle *handle = &hdev->vport[i].nic;
3219                 int ret;
3220
3221                 ret = client->ops->reset_notify(handle, type);
3222                 if (ret) {
3223                         dev_err(&hdev->pdev->dev,
3224                                 "notify nic client failed %d(%d)\n", type, ret);
3225                         return ret;
3226                 }
3227         }
3228
3229         return 0;
3230 }
3231
3232 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3233                                     enum hnae3_reset_notify_type type)
3234 {
3235         struct hnae3_client *client = hdev->roce_client;
3236         int ret = 0;
3237         u16 i;
3238
3239         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3240                 return 0;
3241
3242         if (!client->ops->reset_notify)
3243                 return -EOPNOTSUPP;
3244
3245         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3246                 struct hnae3_handle *handle = &hdev->vport[i].roce;
3247
3248                 ret = client->ops->reset_notify(handle, type);
3249                 if (ret) {
3250                         dev_err(&hdev->pdev->dev,
3251                                 "notify roce client failed %d(%d)",
3252                                 type, ret);
3253                         return ret;
3254                 }
3255         }
3256
3257         return ret;
3258 }
3259
3260 static int hclge_reset_wait(struct hclge_dev *hdev)
3261 {
3262 #define HCLGE_RESET_WATI_MS     100
3263 #define HCLGE_RESET_WAIT_CNT    200
3264         u32 val, reg, reg_bit;
3265         u32 cnt = 0;
3266
3267         switch (hdev->reset_type) {
3268         case HNAE3_IMP_RESET:
3269                 reg = HCLGE_GLOBAL_RESET_REG;
3270                 reg_bit = HCLGE_IMP_RESET_BIT;
3271                 break;
3272         case HNAE3_GLOBAL_RESET:
3273                 reg = HCLGE_GLOBAL_RESET_REG;
3274                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3275                 break;
3276         case HNAE3_FUNC_RESET:
3277                 reg = HCLGE_FUN_RST_ING;
3278                 reg_bit = HCLGE_FUN_RST_ING_B;
3279                 break;
3280         case HNAE3_FLR_RESET:
3281                 break;
3282         default:
3283                 dev_err(&hdev->pdev->dev,
3284                         "Wait for unsupported reset type: %d\n",
3285                         hdev->reset_type);
3286                 return -EINVAL;
3287         }
3288
3289         if (hdev->reset_type == HNAE3_FLR_RESET) {
3290                 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
3291                        cnt++ < HCLGE_RESET_WAIT_CNT)
3292                         msleep(HCLGE_RESET_WATI_MS);
3293
3294                 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
3295                         dev_err(&hdev->pdev->dev,
3296                                 "flr wait timeout: %u\n", cnt);
3297                         return -EBUSY;
3298                 }
3299
3300                 return 0;
3301         }
3302
3303         val = hclge_read_dev(&hdev->hw, reg);
3304         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3305                 msleep(HCLGE_RESET_WATI_MS);
3306                 val = hclge_read_dev(&hdev->hw, reg);
3307                 cnt++;
3308         }
3309
3310         if (cnt >= HCLGE_RESET_WAIT_CNT) {
3311                 dev_warn(&hdev->pdev->dev,
3312                          "Wait for reset timeout: %d\n", hdev->reset_type);
3313                 return -EBUSY;
3314         }
3315
3316         return 0;
3317 }
3318
3319 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3320 {
3321         struct hclge_vf_rst_cmd *req;
3322         struct hclge_desc desc;
3323
3324         req = (struct hclge_vf_rst_cmd *)desc.data;
3325         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3326         req->dest_vfid = func_id;
3327
3328         if (reset)
3329                 req->vf_rst = 0x1;
3330
3331         return hclge_cmd_send(&hdev->hw, &desc, 1);
3332 }
3333
3334 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3335 {
3336         int i;
3337
3338         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3339                 struct hclge_vport *vport = &hdev->vport[i];
3340                 int ret;
3341
3342                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3343                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3344                 if (ret) {
3345                         dev_err(&hdev->pdev->dev,
3346                                 "set vf(%u) rst failed %d!\n",
3347                                 vport->vport_id, ret);
3348                         return ret;
3349                 }
3350
3351                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3352                         continue;
3353
3354                 /* Inform VF to process the reset.
3355                  * hclge_inform_reset_assert_to_vf may fail if VF
3356                  * driver is not loaded.
3357                  */
3358                 ret = hclge_inform_reset_assert_to_vf(vport);
3359                 if (ret)
3360                         dev_warn(&hdev->pdev->dev,
3361                                  "inform reset to vf(%u) failed %d!\n",
3362                                  vport->vport_id, ret);
3363         }
3364
3365         return 0;
3366 }
3367
3368 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3369 {
3370         if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3371             test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3372             test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3373                 return;
3374
3375         hclge_mbx_handler(hdev);
3376
3377         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3378 }
3379
3380 static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3381 {
3382         struct hclge_pf_rst_sync_cmd *req;
3383         struct hclge_desc desc;
3384         int cnt = 0;
3385         int ret;
3386
3387         req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3388         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3389
3390         do {
3391                 /* vf need to down netdev by mbx during PF or FLR reset */
3392                 hclge_mailbox_service_task(hdev);
3393
3394                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3395                 /* for compatible with old firmware, wait
3396                  * 100 ms for VF to stop IO
3397                  */
3398                 if (ret == -EOPNOTSUPP) {
3399                         msleep(HCLGE_RESET_SYNC_TIME);
3400                         return 0;
3401                 } else if (ret) {
3402                         dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n",
3403                                 ret);
3404                         return ret;
3405                 } else if (req->all_vf_ready) {
3406                         return 0;
3407                 }
3408                 msleep(HCLGE_PF_RESET_SYNC_TIME);
3409                 hclge_cmd_reuse_desc(&desc, true);
3410         } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3411
3412         dev_err(&hdev->pdev->dev, "sync with VF timeout!\n");
3413         return -ETIME;
3414 }
3415
3416 void hclge_report_hw_error(struct hclge_dev *hdev,
3417                            enum hnae3_hw_error_type type)
3418 {
3419         struct hnae3_client *client = hdev->nic_client;
3420         u16 i;
3421
3422         if (!client || !client->ops->process_hw_error ||
3423             !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3424                 return;
3425
3426         for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3427                 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3428 }
3429
3430 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3431 {
3432         u32 reg_val;
3433
3434         reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3435         if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3436                 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3437                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3438                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3439         }
3440
3441         if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3442                 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3443                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3444                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3445         }
3446 }
3447
3448 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3449 {
3450         struct hclge_desc desc;
3451         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3452         int ret;
3453
3454         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3455         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3456         req->fun_reset_vfid = func_id;
3457
3458         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3459         if (ret)
3460                 dev_err(&hdev->pdev->dev,
3461                         "send function reset cmd fail, status =%d\n", ret);
3462
3463         return ret;
3464 }
3465
3466 static void hclge_do_reset(struct hclge_dev *hdev)
3467 {
3468         struct hnae3_handle *handle = &hdev->vport[0].nic;
3469         struct pci_dev *pdev = hdev->pdev;
3470         u32 val;
3471
3472         if (hclge_get_hw_reset_stat(handle)) {
3473                 dev_info(&pdev->dev, "Hardware reset not finish\n");
3474                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3475                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3476                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3477                 return;
3478         }
3479
3480         switch (hdev->reset_type) {
3481         case HNAE3_GLOBAL_RESET:
3482                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3483                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3484                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3485                 dev_info(&pdev->dev, "Global Reset requested\n");
3486                 break;
3487         case HNAE3_FUNC_RESET:
3488                 dev_info(&pdev->dev, "PF Reset requested\n");
3489                 /* schedule again to check later */
3490                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3491                 hclge_reset_task_schedule(hdev);
3492                 break;
3493         case HNAE3_FLR_RESET:
3494                 dev_info(&pdev->dev, "FLR requested\n");
3495                 /* schedule again to check later */
3496                 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3497                 hclge_reset_task_schedule(hdev);
3498                 break;
3499         default:
3500                 dev_warn(&pdev->dev,
3501                          "Unsupported reset type: %d\n", hdev->reset_type);
3502                 break;
3503         }
3504 }
3505
3506 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3507                                                    unsigned long *addr)
3508 {
3509         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3510         struct hclge_dev *hdev = ae_dev->priv;
3511
3512         /* first, resolve any unknown reset type to the known type(s) */
3513         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3514                 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3515                                         HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
3516                 /* we will intentionally ignore any errors from this function
3517                  *  as we will end up in *some* reset request in any case
3518                  */
3519                 if (hclge_handle_hw_msix_error(hdev, addr))
3520                         dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3521                                  msix_sts_reg);
3522
3523                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3524                 /* We defered the clearing of the error event which caused
3525                  * interrupt since it was not posssible to do that in
3526                  * interrupt context (and this is the reason we introduced
3527                  * new UNKNOWN reset type). Now, the errors have been
3528                  * handled and cleared in hardware we can safely enable
3529                  * interrupts. This is an exception to the norm.
3530                  */
3531                 hclge_enable_vector(&hdev->misc_vector, true);
3532         }
3533
3534         /* return the highest priority reset level amongst all */
3535         if (test_bit(HNAE3_IMP_RESET, addr)) {
3536                 rst_level = HNAE3_IMP_RESET;
3537                 clear_bit(HNAE3_IMP_RESET, addr);
3538                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3539                 clear_bit(HNAE3_FUNC_RESET, addr);
3540         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3541                 rst_level = HNAE3_GLOBAL_RESET;
3542                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3543                 clear_bit(HNAE3_FUNC_RESET, addr);
3544         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3545                 rst_level = HNAE3_FUNC_RESET;
3546                 clear_bit(HNAE3_FUNC_RESET, addr);
3547         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3548                 rst_level = HNAE3_FLR_RESET;
3549                 clear_bit(HNAE3_FLR_RESET, addr);
3550         }
3551
3552         if (hdev->reset_type != HNAE3_NONE_RESET &&
3553             rst_level < hdev->reset_type)
3554                 return HNAE3_NONE_RESET;
3555
3556         return rst_level;
3557 }
3558
3559 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3560 {
3561         u32 clearval = 0;
3562
3563         switch (hdev->reset_type) {
3564         case HNAE3_IMP_RESET:
3565                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3566                 break;
3567         case HNAE3_GLOBAL_RESET:
3568                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3569                 break;
3570         default:
3571                 break;
3572         }
3573
3574         if (!clearval)
3575                 return;
3576
3577         /* For revision 0x20, the reset interrupt source
3578          * can only be cleared after hardware reset done
3579          */
3580         if (hdev->pdev->revision == 0x20)
3581                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3582                                 clearval);
3583
3584         hclge_enable_vector(&hdev->misc_vector, true);
3585 }
3586
3587 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3588 {
3589         int ret = 0;
3590
3591         switch (hdev->reset_type) {
3592         case HNAE3_FUNC_RESET:
3593                 /* fall through */
3594         case HNAE3_FLR_RESET:
3595                 ret = hclge_set_all_vf_rst(hdev, true);
3596                 break;
3597         default:
3598                 break;
3599         }
3600
3601         return ret;
3602 }
3603
3604 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3605 {
3606         u32 reg_val;
3607
3608         reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3609         if (enable)
3610                 reg_val |= HCLGE_NIC_SW_RST_RDY;
3611         else
3612                 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3613
3614         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3615 }
3616
3617 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3618 {
3619         u32 reg_val;
3620         int ret = 0;
3621
3622         switch (hdev->reset_type) {
3623         case HNAE3_FUNC_RESET:
3624                 /* to confirm whether all running VF is ready
3625                  * before request PF reset
3626                  */
3627                 ret = hclge_func_reset_sync_vf(hdev);
3628                 if (ret)
3629                         return ret;
3630
3631                 ret = hclge_func_reset_cmd(hdev, 0);
3632                 if (ret) {
3633                         dev_err(&hdev->pdev->dev,
3634                                 "asserting function reset fail %d!\n", ret);
3635                         return ret;
3636                 }
3637
3638                 /* After performaning pf reset, it is not necessary to do the
3639                  * mailbox handling or send any command to firmware, because
3640                  * any mailbox handling or command to firmware is only valid
3641                  * after hclge_cmd_init is called.
3642                  */
3643                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3644                 hdev->rst_stats.pf_rst_cnt++;
3645                 break;
3646         case HNAE3_FLR_RESET:
3647                 /* to confirm whether all running VF is ready
3648                  * before request PF reset
3649                  */
3650                 ret = hclge_func_reset_sync_vf(hdev);
3651                 if (ret)
3652                         return ret;
3653
3654                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3655                 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3656                 hdev->rst_stats.flr_rst_cnt++;
3657                 break;
3658         case HNAE3_IMP_RESET:
3659                 hclge_handle_imp_error(hdev);
3660                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3661                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3662                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3663                 break;
3664         default:
3665                 break;
3666         }
3667
3668         /* inform hardware that preparatory work is done */
3669         msleep(HCLGE_RESET_SYNC_TIME);
3670         hclge_reset_handshake(hdev, true);
3671         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3672
3673         return ret;
3674 }
3675
3676 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3677 {
3678 #define MAX_RESET_FAIL_CNT 5
3679
3680         if (hdev->reset_pending) {
3681                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3682                          hdev->reset_pending);
3683                 return true;
3684         } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3685                    HCLGE_RESET_INT_M) {
3686                 dev_info(&hdev->pdev->dev,
3687                          "reset failed because new reset interrupt\n");
3688                 hclge_clear_reset_cause(hdev);
3689                 return false;
3690         } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3691                 hdev->rst_stats.reset_fail_cnt++;
3692                 set_bit(hdev->reset_type, &hdev->reset_pending);
3693                 dev_info(&hdev->pdev->dev,
3694                          "re-schedule reset task(%u)\n",
3695                          hdev->rst_stats.reset_fail_cnt);
3696                 return true;
3697         }
3698
3699         hclge_clear_reset_cause(hdev);
3700
3701         /* recover the handshake status when reset fail */
3702         hclge_reset_handshake(hdev, true);
3703
3704         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3705
3706         hclge_dbg_dump_rst_info(hdev);
3707
3708         set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3709
3710         return false;
3711 }
3712
3713 static int hclge_set_rst_done(struct hclge_dev *hdev)
3714 {
3715         struct hclge_pf_rst_done_cmd *req;
3716         struct hclge_desc desc;
3717         int ret;
3718
3719         req = (struct hclge_pf_rst_done_cmd *)desc.data;
3720         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3721         req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3722
3723         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3724         /* To be compatible with the old firmware, which does not support
3725          * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3726          * return success
3727          */
3728         if (ret == -EOPNOTSUPP) {
3729                 dev_warn(&hdev->pdev->dev,
3730                          "current firmware does not support command(0x%x)!\n",
3731                          HCLGE_OPC_PF_RST_DONE);
3732                 return 0;
3733         } else if (ret) {
3734                 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3735                         ret);
3736         }
3737
3738         return ret;
3739 }
3740
3741 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3742 {
3743         int ret = 0;
3744
3745         switch (hdev->reset_type) {
3746         case HNAE3_FUNC_RESET:
3747                 /* fall through */
3748         case HNAE3_FLR_RESET:
3749                 ret = hclge_set_all_vf_rst(hdev, false);
3750                 break;
3751         case HNAE3_GLOBAL_RESET:
3752                 /* fall through */
3753         case HNAE3_IMP_RESET:
3754                 ret = hclge_set_rst_done(hdev);
3755                 break;
3756         default:
3757                 break;
3758         }
3759
3760         /* clear up the handshake status after re-initialize done */
3761         hclge_reset_handshake(hdev, false);
3762
3763         return ret;
3764 }
3765
3766 static int hclge_reset_stack(struct hclge_dev *hdev)
3767 {
3768         int ret;
3769
3770         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3771         if (ret)
3772                 return ret;
3773
3774         ret = hclge_reset_ae_dev(hdev->ae_dev);
3775         if (ret)
3776                 return ret;
3777
3778         ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3779         if (ret)
3780                 return ret;
3781
3782         return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3783 }
3784
3785 static void hclge_reset(struct hclge_dev *hdev)
3786 {
3787         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3788         enum hnae3_reset_type reset_level;
3789         int ret;
3790
3791         /* Initialize ae_dev reset status as well, in case enet layer wants to
3792          * know if device is undergoing reset
3793          */
3794         ae_dev->reset_type = hdev->reset_type;
3795         hdev->rst_stats.reset_cnt++;
3796         /* perform reset of the stack & ae device for a client */
3797         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3798         if (ret)
3799                 goto err_reset;
3800
3801         ret = hclge_reset_prepare_down(hdev);
3802         if (ret)
3803                 goto err_reset;
3804
3805         rtnl_lock();
3806         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3807         if (ret)
3808                 goto err_reset_lock;
3809
3810         rtnl_unlock();
3811
3812         ret = hclge_reset_prepare_wait(hdev);
3813         if (ret)
3814                 goto err_reset;
3815
3816         if (hclge_reset_wait(hdev))
3817                 goto err_reset;
3818
3819         hdev->rst_stats.hw_reset_done_cnt++;
3820
3821         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3822         if (ret)
3823                 goto err_reset;
3824
3825         rtnl_lock();
3826
3827         ret = hclge_reset_stack(hdev);
3828         if (ret)
3829                 goto err_reset_lock;
3830
3831         hclge_clear_reset_cause(hdev);
3832
3833         ret = hclge_reset_prepare_up(hdev);
3834         if (ret)
3835                 goto err_reset_lock;
3836
3837         rtnl_unlock();
3838
3839         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3840         /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3841          * times
3842          */
3843         if (ret &&
3844             hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3845                 goto err_reset;
3846
3847         rtnl_lock();
3848
3849         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3850         if (ret)
3851                 goto err_reset_lock;
3852
3853         rtnl_unlock();
3854
3855         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3856         if (ret)
3857                 goto err_reset;
3858
3859         hdev->last_reset_time = jiffies;
3860         hdev->rst_stats.reset_fail_cnt = 0;
3861         hdev->rst_stats.reset_done_cnt++;
3862         ae_dev->reset_type = HNAE3_NONE_RESET;
3863         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3864
3865         /* if default_reset_request has a higher level reset request,
3866          * it should be handled as soon as possible. since some errors
3867          * need this kind of reset to fix.
3868          */
3869         reset_level = hclge_get_reset_level(ae_dev,
3870                                             &hdev->default_reset_request);
3871         if (reset_level != HNAE3_NONE_RESET)
3872                 set_bit(reset_level, &hdev->reset_request);
3873
3874         return;
3875
3876 err_reset_lock:
3877         rtnl_unlock();
3878 err_reset:
3879         if (hclge_reset_err_handle(hdev))
3880                 hclge_reset_task_schedule(hdev);
3881 }
3882
3883 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3884 {
3885         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3886         struct hclge_dev *hdev = ae_dev->priv;
3887
3888         /* We might end up getting called broadly because of 2 below cases:
3889          * 1. Recoverable error was conveyed through APEI and only way to bring
3890          *    normalcy is to reset.
3891          * 2. A new reset request from the stack due to timeout
3892          *
3893          * For the first case,error event might not have ae handle available.
3894          * check if this is a new reset request and we are not here just because
3895          * last reset attempt did not succeed and watchdog hit us again. We will
3896          * know this if last reset request did not occur very recently (watchdog
3897          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3898          * In case of new request we reset the "reset level" to PF reset.
3899          * And if it is a repeat reset request of the most recent one then we
3900          * want to make sure we throttle the reset request. Therefore, we will
3901          * not allow it again before 3*HZ times.
3902          */
3903         if (!handle)
3904                 handle = &hdev->vport[0].nic;
3905
3906         if (time_before(jiffies, (hdev->last_reset_time +
3907                                   HCLGE_RESET_INTERVAL))) {
3908                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3909                 return;
3910         } else if (hdev->default_reset_request) {
3911                 hdev->reset_level =
3912                         hclge_get_reset_level(ae_dev,
3913                                               &hdev->default_reset_request);
3914         } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3915                 hdev->reset_level = HNAE3_FUNC_RESET;
3916         }
3917
3918         dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3919                  hdev->reset_level);
3920
3921         /* request reset & schedule reset task */
3922         set_bit(hdev->reset_level, &hdev->reset_request);
3923         hclge_reset_task_schedule(hdev);
3924
3925         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3926                 hdev->reset_level++;
3927 }
3928
3929 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3930                                         enum hnae3_reset_type rst_type)
3931 {
3932         struct hclge_dev *hdev = ae_dev->priv;
3933
3934         set_bit(rst_type, &hdev->default_reset_request);
3935 }
3936
3937 static void hclge_reset_timer(struct timer_list *t)
3938 {
3939         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3940
3941         /* if default_reset_request has no value, it means that this reset
3942          * request has already be handled, so just return here
3943          */
3944         if (!hdev->default_reset_request)
3945                 return;
3946
3947         dev_info(&hdev->pdev->dev,
3948                  "triggering reset in reset timer\n");
3949         hclge_reset_event(hdev->pdev, NULL);
3950 }
3951
3952 static void hclge_reset_subtask(struct hclge_dev *hdev)
3953 {
3954         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3955
3956         /* check if there is any ongoing reset in the hardware. This status can
3957          * be checked from reset_pending. If there is then, we need to wait for
3958          * hardware to complete reset.
3959          *    a. If we are able to figure out in reasonable time that hardware
3960          *       has fully resetted then, we can proceed with driver, client
3961          *       reset.
3962          *    b. else, we can come back later to check this status so re-sched
3963          *       now.
3964          */
3965         hdev->last_reset_time = jiffies;
3966         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3967         if (hdev->reset_type != HNAE3_NONE_RESET)
3968                 hclge_reset(hdev);
3969
3970         /* check if we got any *new* reset requests to be honored */
3971         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3972         if (hdev->reset_type != HNAE3_NONE_RESET)
3973                 hclge_do_reset(hdev);
3974
3975         hdev->reset_type = HNAE3_NONE_RESET;
3976 }
3977
3978 static void hclge_reset_service_task(struct hclge_dev *hdev)
3979 {
3980         if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
3981                 return;
3982
3983         if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3984                 return;
3985
3986         hclge_reset_subtask(hdev);
3987
3988         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3989 }
3990
3991 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3992 {
3993         int i;
3994
3995         /* start from vport 1 for PF is always alive */
3996         for (i = 1; i < hdev->num_alloc_vport; i++) {
3997                 struct hclge_vport *vport = &hdev->vport[i];
3998
3999                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4000                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4001
4002                 /* If vf is not alive, set to default value */
4003                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4004                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4005         }
4006 }
4007
4008 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4009 {
4010         unsigned long delta = round_jiffies_relative(HZ);
4011
4012         /* Always handle the link updating to make sure link state is
4013          * updated when it is triggered by mbx.
4014          */
4015         hclge_update_link_status(hdev);
4016
4017         if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4018                 delta = jiffies - hdev->last_serv_processed;
4019
4020                 if (delta < round_jiffies_relative(HZ)) {
4021                         delta = round_jiffies_relative(HZ) - delta;
4022                         goto out;
4023                 }
4024         }
4025
4026         hdev->serv_processed_cnt++;
4027         hclge_update_vport_alive(hdev);
4028
4029         if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4030                 hdev->last_serv_processed = jiffies;
4031                 goto out;
4032         }
4033
4034         if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4035                 hclge_update_stats_for_all(hdev);
4036
4037         hclge_update_port_info(hdev);
4038         hclge_sync_vlan_filter(hdev);
4039
4040         if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4041                 hclge_rfs_filter_expire(hdev);
4042
4043         hdev->last_serv_processed = jiffies;
4044
4045 out:
4046         hclge_task_schedule(hdev, delta);
4047 }
4048
4049 static void hclge_service_task(struct work_struct *work)
4050 {
4051         struct hclge_dev *hdev =
4052                 container_of(work, struct hclge_dev, service_task.work);
4053
4054         hclge_reset_service_task(hdev);
4055         hclge_mailbox_service_task(hdev);
4056         hclge_periodic_service_task(hdev);
4057
4058         /* Handle reset and mbx again in case periodical task delays the
4059          * handling by calling hclge_task_schedule() in
4060          * hclge_periodic_service_task().
4061          */
4062         hclge_reset_service_task(hdev);
4063         hclge_mailbox_service_task(hdev);
4064 }
4065
4066 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4067 {
4068         /* VF handle has no client */
4069         if (!handle->client)
4070                 return container_of(handle, struct hclge_vport, nic);
4071         else if (handle->client->type == HNAE3_CLIENT_ROCE)
4072                 return container_of(handle, struct hclge_vport, roce);
4073         else
4074                 return container_of(handle, struct hclge_vport, nic);
4075 }
4076
4077 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4078                             struct hnae3_vector_info *vector_info)
4079 {
4080         struct hclge_vport *vport = hclge_get_vport(handle);
4081         struct hnae3_vector_info *vector = vector_info;
4082         struct hclge_dev *hdev = vport->back;
4083         int alloc = 0;
4084         int i, j;
4085
4086         vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4087         vector_num = min(hdev->num_msi_left, vector_num);
4088
4089         for (j = 0; j < vector_num; j++) {
4090                 for (i = 1; i < hdev->num_msi; i++) {
4091                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4092                                 vector->vector = pci_irq_vector(hdev->pdev, i);
4093                                 vector->io_addr = hdev->hw.io_base +
4094                                         HCLGE_VECTOR_REG_BASE +
4095                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
4096                                         vport->vport_id *
4097                                         HCLGE_VECTOR_VF_OFFSET;
4098                                 hdev->vector_status[i] = vport->vport_id;
4099                                 hdev->vector_irq[i] = vector->vector;
4100
4101                                 vector++;
4102                                 alloc++;
4103
4104                                 break;
4105                         }
4106                 }
4107         }
4108         hdev->num_msi_left -= alloc;
4109         hdev->num_msi_used += alloc;
4110
4111         return alloc;
4112 }
4113
4114 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4115 {
4116         int i;
4117
4118         for (i = 0; i < hdev->num_msi; i++)
4119                 if (vector == hdev->vector_irq[i])
4120                         return i;
4121
4122         return -EINVAL;
4123 }
4124
4125 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4126 {
4127         struct hclge_vport *vport = hclge_get_vport(handle);
4128         struct hclge_dev *hdev = vport->back;
4129         int vector_id;
4130
4131         vector_id = hclge_get_vector_index(hdev, vector);
4132         if (vector_id < 0) {
4133                 dev_err(&hdev->pdev->dev,
4134                         "Get vector index fail. vector_id =%d\n", vector_id);
4135                 return vector_id;
4136         }
4137
4138         hclge_free_vector(hdev, vector_id);
4139
4140         return 0;
4141 }
4142
4143 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4144 {
4145         return HCLGE_RSS_KEY_SIZE;
4146 }
4147
4148 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4149 {
4150         return HCLGE_RSS_IND_TBL_SIZE;
4151 }
4152
4153 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4154                                   const u8 hfunc, const u8 *key)
4155 {
4156         struct hclge_rss_config_cmd *req;
4157         unsigned int key_offset = 0;
4158         struct hclge_desc desc;
4159         int key_counts;
4160         int key_size;
4161         int ret;
4162
4163         key_counts = HCLGE_RSS_KEY_SIZE;
4164         req = (struct hclge_rss_config_cmd *)desc.data;
4165
4166         while (key_counts) {
4167                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4168                                            false);
4169
4170                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4171                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4172
4173                 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4174                 memcpy(req->hash_key,
4175                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4176
4177                 key_counts -= key_size;
4178                 key_offset++;
4179                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4180                 if (ret) {
4181                         dev_err(&hdev->pdev->dev,
4182                                 "Configure RSS config fail, status = %d\n",
4183                                 ret);
4184                         return ret;
4185                 }
4186         }
4187         return 0;
4188 }
4189
4190 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4191 {
4192         struct hclge_rss_indirection_table_cmd *req;
4193         struct hclge_desc desc;
4194         int i, j;
4195         int ret;
4196
4197         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4198
4199         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4200                 hclge_cmd_setup_basic_desc
4201                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4202
4203                 req->start_table_index =
4204                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4205                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4206
4207                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4208                         req->rss_result[j] =
4209                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4210
4211                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4212                 if (ret) {
4213                         dev_err(&hdev->pdev->dev,
4214                                 "Configure rss indir table fail,status = %d\n",
4215                                 ret);
4216                         return ret;
4217                 }
4218         }
4219         return 0;
4220 }
4221
4222 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4223                                  u16 *tc_size, u16 *tc_offset)
4224 {
4225         struct hclge_rss_tc_mode_cmd *req;
4226         struct hclge_desc desc;
4227         int ret;
4228         int i;
4229
4230         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4231         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4232
4233         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4234                 u16 mode = 0;
4235
4236                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4237                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4238                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4239                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4240                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4241
4242                 req->rss_tc_mode[i] = cpu_to_le16(mode);
4243         }
4244
4245         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4246         if (ret)
4247                 dev_err(&hdev->pdev->dev,
4248                         "Configure rss tc mode fail, status = %d\n", ret);
4249
4250         return ret;
4251 }
4252
4253 static void hclge_get_rss_type(struct hclge_vport *vport)
4254 {
4255         if (vport->rss_tuple_sets.ipv4_tcp_en ||
4256             vport->rss_tuple_sets.ipv4_udp_en ||
4257             vport->rss_tuple_sets.ipv4_sctp_en ||
4258             vport->rss_tuple_sets.ipv6_tcp_en ||
4259             vport->rss_tuple_sets.ipv6_udp_en ||
4260             vport->rss_tuple_sets.ipv6_sctp_en)
4261                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4262         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4263                  vport->rss_tuple_sets.ipv6_fragment_en)
4264                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4265         else
4266                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4267 }
4268
4269 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4270 {
4271         struct hclge_rss_input_tuple_cmd *req;
4272         struct hclge_desc desc;
4273         int ret;
4274
4275         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4276
4277         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4278
4279         /* Get the tuple cfg from pf */
4280         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4281         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4282         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4283         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4284         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4285         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4286         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4287         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4288         hclge_get_rss_type(&hdev->vport[0]);
4289         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4290         if (ret)
4291                 dev_err(&hdev->pdev->dev,
4292                         "Configure rss input fail, status = %d\n", ret);
4293         return ret;
4294 }
4295
4296 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4297                          u8 *key, u8 *hfunc)
4298 {
4299         struct hclge_vport *vport = hclge_get_vport(handle);
4300         int i;
4301
4302         /* Get hash algorithm */
4303         if (hfunc) {
4304                 switch (vport->rss_algo) {
4305                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4306                         *hfunc = ETH_RSS_HASH_TOP;
4307                         break;
4308                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4309                         *hfunc = ETH_RSS_HASH_XOR;
4310                         break;
4311                 default:
4312                         *hfunc = ETH_RSS_HASH_UNKNOWN;
4313                         break;
4314                 }
4315         }
4316
4317         /* Get the RSS Key required by the user */
4318         if (key)
4319                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4320
4321         /* Get indirect table */
4322         if (indir)
4323                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4324                         indir[i] =  vport->rss_indirection_tbl[i];
4325
4326         return 0;
4327 }
4328
4329 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4330                          const  u8 *key, const  u8 hfunc)
4331 {
4332         struct hclge_vport *vport = hclge_get_vport(handle);
4333         struct hclge_dev *hdev = vport->back;
4334         u8 hash_algo;
4335         int ret, i;
4336
4337         /* Set the RSS Hash Key if specififed by the user */
4338         if (key) {
4339                 switch (hfunc) {
4340                 case ETH_RSS_HASH_TOP:
4341                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4342                         break;
4343                 case ETH_RSS_HASH_XOR:
4344                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4345                         break;
4346                 case ETH_RSS_HASH_NO_CHANGE:
4347                         hash_algo = vport->rss_algo;
4348                         break;
4349                 default:
4350                         return -EINVAL;
4351                 }
4352
4353                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4354                 if (ret)
4355                         return ret;
4356
4357                 /* Update the shadow RSS key with user specified qids */
4358                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4359                 vport->rss_algo = hash_algo;
4360         }
4361
4362         /* Update the shadow RSS table with user specified qids */
4363         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4364                 vport->rss_indirection_tbl[i] = indir[i];
4365
4366         /* Update the hardware */
4367         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4368 }
4369
4370 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4371 {
4372         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4373
4374         if (nfc->data & RXH_L4_B_2_3)
4375                 hash_sets |= HCLGE_D_PORT_BIT;
4376         else
4377                 hash_sets &= ~HCLGE_D_PORT_BIT;
4378
4379         if (nfc->data & RXH_IP_SRC)
4380                 hash_sets |= HCLGE_S_IP_BIT;
4381         else
4382                 hash_sets &= ~HCLGE_S_IP_BIT;
4383
4384         if (nfc->data & RXH_IP_DST)
4385                 hash_sets |= HCLGE_D_IP_BIT;
4386         else
4387                 hash_sets &= ~HCLGE_D_IP_BIT;
4388
4389         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4390                 hash_sets |= HCLGE_V_TAG_BIT;
4391
4392         return hash_sets;
4393 }
4394
4395 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4396                                struct ethtool_rxnfc *nfc)
4397 {
4398         struct hclge_vport *vport = hclge_get_vport(handle);
4399         struct hclge_dev *hdev = vport->back;
4400         struct hclge_rss_input_tuple_cmd *req;
4401         struct hclge_desc desc;
4402         u8 tuple_sets;
4403         int ret;
4404
4405         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4406                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
4407                 return -EINVAL;
4408
4409         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4410         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4411
4412         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4413         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4414         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4415         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4416         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4417         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4418         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4419         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4420
4421         tuple_sets = hclge_get_rss_hash_bits(nfc);
4422         switch (nfc->flow_type) {
4423         case TCP_V4_FLOW:
4424                 req->ipv4_tcp_en = tuple_sets;
4425                 break;
4426         case TCP_V6_FLOW:
4427                 req->ipv6_tcp_en = tuple_sets;
4428                 break;
4429         case UDP_V4_FLOW:
4430                 req->ipv4_udp_en = tuple_sets;
4431                 break;
4432         case UDP_V6_FLOW:
4433                 req->ipv6_udp_en = tuple_sets;
4434                 break;
4435         case SCTP_V4_FLOW:
4436                 req->ipv4_sctp_en = tuple_sets;
4437                 break;
4438         case SCTP_V6_FLOW:
4439                 if ((nfc->data & RXH_L4_B_0_1) ||
4440                     (nfc->data & RXH_L4_B_2_3))
4441                         return -EINVAL;
4442
4443                 req->ipv6_sctp_en = tuple_sets;
4444                 break;
4445         case IPV4_FLOW:
4446                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4447                 break;
4448         case IPV6_FLOW:
4449                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4450                 break;
4451         default:
4452                 return -EINVAL;
4453         }
4454
4455         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4456         if (ret) {
4457                 dev_err(&hdev->pdev->dev,
4458                         "Set rss tuple fail, status = %d\n", ret);
4459                 return ret;
4460         }
4461
4462         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4463         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4464         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4465         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4466         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4467         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4468         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4469         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4470         hclge_get_rss_type(vport);
4471         return 0;
4472 }
4473
4474 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4475                                struct ethtool_rxnfc *nfc)
4476 {
4477         struct hclge_vport *vport = hclge_get_vport(handle);
4478         u8 tuple_sets;
4479
4480         nfc->data = 0;
4481
4482         switch (nfc->flow_type) {
4483         case TCP_V4_FLOW:
4484                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4485                 break;
4486         case UDP_V4_FLOW:
4487                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4488                 break;
4489         case TCP_V6_FLOW:
4490                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4491                 break;
4492         case UDP_V6_FLOW:
4493                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4494                 break;
4495         case SCTP_V4_FLOW:
4496                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4497                 break;
4498         case SCTP_V6_FLOW:
4499                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4500                 break;
4501         case IPV4_FLOW:
4502         case IPV6_FLOW:
4503                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4504                 break;
4505         default:
4506                 return -EINVAL;
4507         }
4508
4509         if (!tuple_sets)
4510                 return 0;
4511
4512         if (tuple_sets & HCLGE_D_PORT_BIT)
4513                 nfc->data |= RXH_L4_B_2_3;
4514         if (tuple_sets & HCLGE_S_PORT_BIT)
4515                 nfc->data |= RXH_L4_B_0_1;
4516         if (tuple_sets & HCLGE_D_IP_BIT)
4517                 nfc->data |= RXH_IP_DST;
4518         if (tuple_sets & HCLGE_S_IP_BIT)
4519                 nfc->data |= RXH_IP_SRC;
4520
4521         return 0;
4522 }
4523
4524 static int hclge_get_tc_size(struct hnae3_handle *handle)
4525 {
4526         struct hclge_vport *vport = hclge_get_vport(handle);
4527         struct hclge_dev *hdev = vport->back;
4528
4529         return hdev->rss_size_max;
4530 }
4531
4532 int hclge_rss_init_hw(struct hclge_dev *hdev)
4533 {
4534         struct hclge_vport *vport = hdev->vport;
4535         u8 *rss_indir = vport[0].rss_indirection_tbl;
4536         u16 rss_size = vport[0].alloc_rss_size;
4537         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4538         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4539         u8 *key = vport[0].rss_hash_key;
4540         u8 hfunc = vport[0].rss_algo;
4541         u16 tc_valid[HCLGE_MAX_TC_NUM];
4542         u16 roundup_size;
4543         unsigned int i;
4544         int ret;
4545
4546         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4547         if (ret)
4548                 return ret;
4549
4550         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4551         if (ret)
4552                 return ret;
4553
4554         ret = hclge_set_rss_input_tuple(hdev);
4555         if (ret)
4556                 return ret;
4557
4558         /* Each TC have the same queue size, and tc_size set to hardware is
4559          * the log2 of roundup power of two of rss_size, the acutal queue
4560          * size is limited by indirection table.
4561          */
4562         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4563                 dev_err(&hdev->pdev->dev,
4564                         "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4565                         rss_size);
4566                 return -EINVAL;
4567         }
4568
4569         roundup_size = roundup_pow_of_two(rss_size);
4570         roundup_size = ilog2(roundup_size);
4571
4572         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4573                 tc_valid[i] = 0;
4574
4575                 if (!(hdev->hw_tc_map & BIT(i)))
4576                         continue;
4577
4578                 tc_valid[i] = 1;
4579                 tc_size[i] = roundup_size;
4580                 tc_offset[i] = rss_size * i;
4581         }
4582
4583         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4584 }
4585
4586 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4587 {
4588         struct hclge_vport *vport = hdev->vport;
4589         int i, j;
4590
4591         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4592                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4593                         vport[j].rss_indirection_tbl[i] =
4594                                 i % vport[j].alloc_rss_size;
4595         }
4596 }
4597
4598 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4599 {
4600         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4601         struct hclge_vport *vport = hdev->vport;
4602
4603         if (hdev->pdev->revision >= 0x21)
4604                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4605
4606         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4607                 vport[i].rss_tuple_sets.ipv4_tcp_en =
4608                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4609                 vport[i].rss_tuple_sets.ipv4_udp_en =
4610                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4611                 vport[i].rss_tuple_sets.ipv4_sctp_en =
4612                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4613                 vport[i].rss_tuple_sets.ipv4_fragment_en =
4614                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4615                 vport[i].rss_tuple_sets.ipv6_tcp_en =
4616                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4617                 vport[i].rss_tuple_sets.ipv6_udp_en =
4618                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4619                 vport[i].rss_tuple_sets.ipv6_sctp_en =
4620                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4621                 vport[i].rss_tuple_sets.ipv6_fragment_en =
4622                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4623
4624                 vport[i].rss_algo = rss_algo;
4625
4626                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4627                        HCLGE_RSS_KEY_SIZE);
4628         }
4629
4630         hclge_rss_indir_init_cfg(hdev);
4631 }
4632
4633 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4634                                 int vector_id, bool en,
4635                                 struct hnae3_ring_chain_node *ring_chain)
4636 {
4637         struct hclge_dev *hdev = vport->back;
4638         struct hnae3_ring_chain_node *node;
4639         struct hclge_desc desc;
4640         struct hclge_ctrl_vector_chain_cmd *req =
4641                 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4642         enum hclge_cmd_status status;
4643         enum hclge_opcode_type op;
4644         u16 tqp_type_and_id;
4645         int i;
4646
4647         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4648         hclge_cmd_setup_basic_desc(&desc, op, false);
4649         req->int_vector_id = vector_id;
4650
4651         i = 0;
4652         for (node = ring_chain; node; node = node->next) {
4653                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4654                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4655                                 HCLGE_INT_TYPE_S,
4656                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4657                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4658                                 HCLGE_TQP_ID_S, node->tqp_index);
4659                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4660                                 HCLGE_INT_GL_IDX_S,
4661                                 hnae3_get_field(node->int_gl_idx,
4662                                                 HNAE3_RING_GL_IDX_M,
4663                                                 HNAE3_RING_GL_IDX_S));
4664                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4665                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4666                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4667                         req->vfid = vport->vport_id;
4668
4669                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4670                         if (status) {
4671                                 dev_err(&hdev->pdev->dev,
4672                                         "Map TQP fail, status is %d.\n",
4673                                         status);
4674                                 return -EIO;
4675                         }
4676                         i = 0;
4677
4678                         hclge_cmd_setup_basic_desc(&desc,
4679                                                    op,
4680                                                    false);
4681                         req->int_vector_id = vector_id;
4682                 }
4683         }
4684
4685         if (i > 0) {
4686                 req->int_cause_num = i;
4687                 req->vfid = vport->vport_id;
4688                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4689                 if (status) {
4690                         dev_err(&hdev->pdev->dev,
4691                                 "Map TQP fail, status is %d.\n", status);
4692                         return -EIO;
4693                 }
4694         }
4695
4696         return 0;
4697 }
4698
4699 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4700                                     struct hnae3_ring_chain_node *ring_chain)
4701 {
4702         struct hclge_vport *vport = hclge_get_vport(handle);
4703         struct hclge_dev *hdev = vport->back;
4704         int vector_id;
4705
4706         vector_id = hclge_get_vector_index(hdev, vector);
4707         if (vector_id < 0) {
4708                 dev_err(&hdev->pdev->dev,
4709                         "failed to get vector index. vector=%d\n", vector);
4710                 return vector_id;
4711         }
4712
4713         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4714 }
4715
4716 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4717                                        struct hnae3_ring_chain_node *ring_chain)
4718 {
4719         struct hclge_vport *vport = hclge_get_vport(handle);
4720         struct hclge_dev *hdev = vport->back;
4721         int vector_id, ret;
4722
4723         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4724                 return 0;
4725
4726         vector_id = hclge_get_vector_index(hdev, vector);
4727         if (vector_id < 0) {
4728                 dev_err(&handle->pdev->dev,
4729                         "Get vector index fail. ret =%d\n", vector_id);
4730                 return vector_id;
4731         }
4732
4733         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4734         if (ret)
4735                 dev_err(&handle->pdev->dev,
4736                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4737                         vector_id, ret);
4738
4739         return ret;
4740 }
4741
4742 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4743                                       struct hclge_promisc_param *param)
4744 {
4745         struct hclge_promisc_cfg_cmd *req;
4746         struct hclge_desc desc;
4747         int ret;
4748
4749         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4750
4751         req = (struct hclge_promisc_cfg_cmd *)desc.data;
4752         req->vf_id = param->vf_id;
4753
4754         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4755          * pdev revision(0x20), new revision support them. The
4756          * value of this two fields will not return error when driver
4757          * send command to fireware in revision(0x20).
4758          */
4759         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4760                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4761
4762         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4763         if (ret)
4764                 dev_err(&hdev->pdev->dev,
4765                         "Set promisc mode fail, status is %d.\n", ret);
4766
4767         return ret;
4768 }
4769
4770 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4771                                      bool en_uc, bool en_mc, bool en_bc,
4772                                      int vport_id)
4773 {
4774         if (!param)
4775                 return;
4776
4777         memset(param, 0, sizeof(struct hclge_promisc_param));
4778         if (en_uc)
4779                 param->enable = HCLGE_PROMISC_EN_UC;
4780         if (en_mc)
4781                 param->enable |= HCLGE_PROMISC_EN_MC;
4782         if (en_bc)
4783                 param->enable |= HCLGE_PROMISC_EN_BC;
4784         param->vf_id = vport_id;
4785 }
4786
4787 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4788                                  bool en_mc_pmc, bool en_bc_pmc)
4789 {
4790         struct hclge_dev *hdev = vport->back;
4791         struct hclge_promisc_param param;
4792
4793         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4794                                  vport->vport_id);
4795         return hclge_cmd_set_promisc_mode(hdev, &param);
4796 }
4797
4798 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4799                                   bool en_mc_pmc)
4800 {
4801         struct hclge_vport *vport = hclge_get_vport(handle);
4802         bool en_bc_pmc = true;
4803
4804         /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4805          * always bypassed. So broadcast promisc should be disabled until
4806          * user enable promisc mode
4807          */
4808         if (handle->pdev->revision == 0x20)
4809                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4810
4811         return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4812                                             en_bc_pmc);
4813 }
4814
4815 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4816 {
4817         struct hclge_get_fd_mode_cmd *req;
4818         struct hclge_desc desc;
4819         int ret;
4820
4821         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4822
4823         req = (struct hclge_get_fd_mode_cmd *)desc.data;
4824
4825         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4826         if (ret) {
4827                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4828                 return ret;
4829         }
4830
4831         *fd_mode = req->mode;
4832
4833         return ret;
4834 }
4835
4836 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4837                                    u32 *stage1_entry_num,
4838                                    u32 *stage2_entry_num,
4839                                    u16 *stage1_counter_num,
4840                                    u16 *stage2_counter_num)
4841 {
4842         struct hclge_get_fd_allocation_cmd *req;
4843         struct hclge_desc desc;
4844         int ret;
4845
4846         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4847
4848         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4849
4850         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4851         if (ret) {
4852                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4853                         ret);
4854                 return ret;
4855         }
4856
4857         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4858         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4859         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4860         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4861
4862         return ret;
4863 }
4864
4865 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4866 {
4867         struct hclge_set_fd_key_config_cmd *req;
4868         struct hclge_fd_key_cfg *stage;
4869         struct hclge_desc desc;
4870         int ret;
4871
4872         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4873
4874         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4875         stage = &hdev->fd_cfg.key_cfg[stage_num];
4876         req->stage = stage_num;
4877         req->key_select = stage->key_sel;
4878         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4879         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4880         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4881         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4882         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4883         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4884
4885         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4886         if (ret)
4887                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4888
4889         return ret;
4890 }
4891
4892 static int hclge_init_fd_config(struct hclge_dev *hdev)
4893 {
4894 #define LOW_2_WORDS             0x03
4895         struct hclge_fd_key_cfg *key_cfg;
4896         int ret;
4897
4898         if (!hnae3_dev_fd_supported(hdev))
4899                 return 0;
4900
4901         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4902         if (ret)
4903                 return ret;
4904
4905         switch (hdev->fd_cfg.fd_mode) {
4906         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4907                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4908                 break;
4909         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4910                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4911                 break;
4912         default:
4913                 dev_err(&hdev->pdev->dev,
4914                         "Unsupported flow director mode %u\n",
4915                         hdev->fd_cfg.fd_mode);
4916                 return -EOPNOTSUPP;
4917         }
4918
4919         hdev->fd_cfg.proto_support =
4920                 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4921                 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4922         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4923         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4924         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4925         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4926         key_cfg->outer_sipv6_word_en = 0;
4927         key_cfg->outer_dipv6_word_en = 0;
4928
4929         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4930                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4931                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4932                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4933
4934         /* If use max 400bit key, we can support tuples for ether type */
4935         if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4936                 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4937                 key_cfg->tuple_active |=
4938                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4939         }
4940
4941         /* roce_type is used to filter roce frames
4942          * dst_vport is used to specify the rule
4943          */
4944         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4945
4946         ret = hclge_get_fd_allocation(hdev,
4947                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4948                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4949                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4950                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4951         if (ret)
4952                 return ret;
4953
4954         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4955 }
4956
4957 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4958                                 int loc, u8 *key, bool is_add)
4959 {
4960         struct hclge_fd_tcam_config_1_cmd *req1;
4961         struct hclge_fd_tcam_config_2_cmd *req2;
4962         struct hclge_fd_tcam_config_3_cmd *req3;
4963         struct hclge_desc desc[3];
4964         int ret;
4965
4966         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4967         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4968         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4969         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4970         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4971
4972         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4973         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4974         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4975
4976         req1->stage = stage;
4977         req1->xy_sel = sel_x ? 1 : 0;
4978         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4979         req1->index = cpu_to_le32(loc);
4980         req1->entry_vld = sel_x ? is_add : 0;
4981
4982         if (key) {
4983                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4984                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4985                        sizeof(req2->tcam_data));
4986                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4987                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4988         }
4989
4990         ret = hclge_cmd_send(&hdev->hw, desc, 3);
4991         if (ret)
4992                 dev_err(&hdev->pdev->dev,
4993                         "config tcam key fail, ret=%d\n",
4994                         ret);
4995
4996         return ret;
4997 }
4998
4999 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5000                               struct hclge_fd_ad_data *action)
5001 {
5002         struct hclge_fd_ad_config_cmd *req;
5003         struct hclge_desc desc;
5004         u64 ad_data = 0;
5005         int ret;
5006
5007         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5008
5009         req = (struct hclge_fd_ad_config_cmd *)desc.data;
5010         req->index = cpu_to_le32(loc);
5011         req->stage = stage;
5012
5013         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5014                       action->write_rule_id_to_bd);
5015         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5016                         action->rule_id);
5017         ad_data <<= 32;
5018         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5019         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5020                       action->forward_to_direct_queue);
5021         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5022                         action->queue_id);
5023         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5024         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5025                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5026         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5027         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5028                         action->counter_id);
5029
5030         req->ad_data = cpu_to_le64(ad_data);
5031         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5032         if (ret)
5033                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5034
5035         return ret;
5036 }
5037
5038 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5039                                    struct hclge_fd_rule *rule)
5040 {
5041         u16 tmp_x_s, tmp_y_s;
5042         u32 tmp_x_l, tmp_y_l;
5043         int i;
5044
5045         if (rule->unused_tuple & tuple_bit)
5046                 return true;
5047
5048         switch (tuple_bit) {
5049         case 0:
5050                 return false;
5051         case BIT(INNER_DST_MAC):
5052                 for (i = 0; i < ETH_ALEN; i++) {
5053                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5054                                rule->tuples_mask.dst_mac[i]);
5055                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5056                                rule->tuples_mask.dst_mac[i]);
5057                 }
5058
5059                 return true;
5060         case BIT(INNER_SRC_MAC):
5061                 for (i = 0; i < ETH_ALEN; i++) {
5062                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5063                                rule->tuples.src_mac[i]);
5064                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5065                                rule->tuples.src_mac[i]);
5066                 }
5067
5068                 return true;
5069         case BIT(INNER_VLAN_TAG_FST):
5070                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5071                        rule->tuples_mask.vlan_tag1);
5072                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5073                        rule->tuples_mask.vlan_tag1);
5074                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5075                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5076
5077                 return true;
5078         case BIT(INNER_ETH_TYPE):
5079                 calc_x(tmp_x_s, rule->tuples.ether_proto,
5080                        rule->tuples_mask.ether_proto);
5081                 calc_y(tmp_y_s, rule->tuples.ether_proto,
5082                        rule->tuples_mask.ether_proto);
5083                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5084                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5085
5086                 return true;
5087         case BIT(INNER_IP_TOS):
5088                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5089                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5090
5091                 return true;
5092         case BIT(INNER_IP_PROTO):
5093                 calc_x(*key_x, rule->tuples.ip_proto,
5094                        rule->tuples_mask.ip_proto);
5095                 calc_y(*key_y, rule->tuples.ip_proto,
5096                        rule->tuples_mask.ip_proto);
5097
5098                 return true;
5099         case BIT(INNER_SRC_IP):
5100                 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5101                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5102                 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5103                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5104                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5105                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5106
5107                 return true;
5108         case BIT(INNER_DST_IP):
5109                 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5110                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5111                 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5112                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5113                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5114                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5115
5116                 return true;
5117         case BIT(INNER_SRC_PORT):
5118                 calc_x(tmp_x_s, rule->tuples.src_port,
5119                        rule->tuples_mask.src_port);
5120                 calc_y(tmp_y_s, rule->tuples.src_port,
5121                        rule->tuples_mask.src_port);
5122                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5123                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5124
5125                 return true;
5126         case BIT(INNER_DST_PORT):
5127                 calc_x(tmp_x_s, rule->tuples.dst_port,
5128                        rule->tuples_mask.dst_port);
5129                 calc_y(tmp_y_s, rule->tuples.dst_port,
5130                        rule->tuples_mask.dst_port);
5131                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5132                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5133
5134                 return true;
5135         default:
5136                 return false;
5137         }
5138 }
5139
5140 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5141                                  u8 vf_id, u8 network_port_id)
5142 {
5143         u32 port_number = 0;
5144
5145         if (port_type == HOST_PORT) {
5146                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5147                                 pf_id);
5148                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5149                                 vf_id);
5150                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5151         } else {
5152                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5153                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5154                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5155         }
5156
5157         return port_number;
5158 }
5159
5160 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5161                                        __le32 *key_x, __le32 *key_y,
5162                                        struct hclge_fd_rule *rule)
5163 {
5164         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5165         u8 cur_pos = 0, tuple_size, shift_bits;
5166         unsigned int i;
5167
5168         for (i = 0; i < MAX_META_DATA; i++) {
5169                 tuple_size = meta_data_key_info[i].key_length;
5170                 tuple_bit = key_cfg->meta_data_active & BIT(i);
5171
5172                 switch (tuple_bit) {
5173                 case BIT(ROCE_TYPE):
5174                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5175                         cur_pos += tuple_size;
5176                         break;
5177                 case BIT(DST_VPORT):
5178                         port_number = hclge_get_port_number(HOST_PORT, 0,
5179                                                             rule->vf_id, 0);
5180                         hnae3_set_field(meta_data,
5181                                         GENMASK(cur_pos + tuple_size, cur_pos),
5182                                         cur_pos, port_number);
5183                         cur_pos += tuple_size;
5184                         break;
5185                 default:
5186                         break;
5187                 }
5188         }
5189
5190         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5191         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5192         shift_bits = sizeof(meta_data) * 8 - cur_pos;
5193
5194         *key_x = cpu_to_le32(tmp_x << shift_bits);
5195         *key_y = cpu_to_le32(tmp_y << shift_bits);
5196 }
5197
5198 /* A complete key is combined with meta data key and tuple key.
5199  * Meta data key is stored at the MSB region, and tuple key is stored at
5200  * the LSB region, unused bits will be filled 0.
5201  */
5202 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5203                             struct hclge_fd_rule *rule)
5204 {
5205         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5206         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5207         u8 *cur_key_x, *cur_key_y;
5208         unsigned int i;
5209         int ret, tuple_size;
5210         u8 meta_data_region;
5211
5212         memset(key_x, 0, sizeof(key_x));
5213         memset(key_y, 0, sizeof(key_y));
5214         cur_key_x = key_x;
5215         cur_key_y = key_y;
5216
5217         for (i = 0 ; i < MAX_TUPLE; i++) {
5218                 bool tuple_valid;
5219                 u32 check_tuple;
5220
5221                 tuple_size = tuple_key_info[i].key_length / 8;
5222                 check_tuple = key_cfg->tuple_active & BIT(i);
5223
5224                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5225                                                      cur_key_y, rule);
5226                 if (tuple_valid) {
5227                         cur_key_x += tuple_size;
5228                         cur_key_y += tuple_size;
5229                 }
5230         }
5231
5232         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5233                         MAX_META_DATA_LENGTH / 8;
5234
5235         hclge_fd_convert_meta_data(key_cfg,
5236                                    (__le32 *)(key_x + meta_data_region),
5237                                    (__le32 *)(key_y + meta_data_region),
5238                                    rule);
5239
5240         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5241                                    true);
5242         if (ret) {
5243                 dev_err(&hdev->pdev->dev,
5244                         "fd key_y config fail, loc=%u, ret=%d\n",
5245                         rule->queue_id, ret);
5246                 return ret;
5247         }
5248
5249         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5250                                    true);
5251         if (ret)
5252                 dev_err(&hdev->pdev->dev,
5253                         "fd key_x config fail, loc=%u, ret=%d\n",
5254                         rule->queue_id, ret);
5255         return ret;
5256 }
5257
5258 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5259                                struct hclge_fd_rule *rule)
5260 {
5261         struct hclge_fd_ad_data ad_data;
5262
5263         ad_data.ad_id = rule->location;
5264
5265         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5266                 ad_data.drop_packet = true;
5267                 ad_data.forward_to_direct_queue = false;
5268                 ad_data.queue_id = 0;
5269         } else {
5270                 ad_data.drop_packet = false;
5271                 ad_data.forward_to_direct_queue = true;
5272                 ad_data.queue_id = rule->queue_id;
5273         }
5274
5275         ad_data.use_counter = false;
5276         ad_data.counter_id = 0;
5277
5278         ad_data.use_next_stage = false;
5279         ad_data.next_input_key = 0;
5280
5281         ad_data.write_rule_id_to_bd = true;
5282         ad_data.rule_id = rule->location;
5283
5284         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5285 }
5286
5287 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5288                                struct ethtool_rx_flow_spec *fs, u32 *unused)
5289 {
5290         struct ethtool_tcpip4_spec *tcp_ip4_spec;
5291         struct ethtool_usrip4_spec *usr_ip4_spec;
5292         struct ethtool_tcpip6_spec *tcp_ip6_spec;
5293         struct ethtool_usrip6_spec *usr_ip6_spec;
5294         struct ethhdr *ether_spec;
5295
5296         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5297                 return -EINVAL;
5298
5299         if (!(fs->flow_type & hdev->fd_cfg.proto_support))
5300                 return -EOPNOTSUPP;
5301
5302         if ((fs->flow_type & FLOW_EXT) &&
5303             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5304                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5305                 return -EOPNOTSUPP;
5306         }
5307
5308         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5309         case SCTP_V4_FLOW:
5310         case TCP_V4_FLOW:
5311         case UDP_V4_FLOW:
5312                 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
5313                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5314
5315                 if (!tcp_ip4_spec->ip4src)
5316                         *unused |= BIT(INNER_SRC_IP);
5317
5318                 if (!tcp_ip4_spec->ip4dst)
5319                         *unused |= BIT(INNER_DST_IP);
5320
5321                 if (!tcp_ip4_spec->psrc)
5322                         *unused |= BIT(INNER_SRC_PORT);
5323
5324                 if (!tcp_ip4_spec->pdst)
5325                         *unused |= BIT(INNER_DST_PORT);
5326
5327                 if (!tcp_ip4_spec->tos)
5328                         *unused |= BIT(INNER_IP_TOS);
5329
5330                 break;
5331         case IP_USER_FLOW:
5332                 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
5333                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5334                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5335
5336                 if (!usr_ip4_spec->ip4src)
5337                         *unused |= BIT(INNER_SRC_IP);
5338
5339                 if (!usr_ip4_spec->ip4dst)
5340                         *unused |= BIT(INNER_DST_IP);
5341
5342                 if (!usr_ip4_spec->tos)
5343                         *unused |= BIT(INNER_IP_TOS);
5344
5345                 if (!usr_ip4_spec->proto)
5346                         *unused |= BIT(INNER_IP_PROTO);
5347
5348                 if (usr_ip4_spec->l4_4_bytes)
5349                         return -EOPNOTSUPP;
5350
5351                 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
5352                         return -EOPNOTSUPP;
5353
5354                 break;
5355         case SCTP_V6_FLOW:
5356         case TCP_V6_FLOW:
5357         case UDP_V6_FLOW:
5358                 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
5359                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5360                         BIT(INNER_IP_TOS);
5361
5362                 /* check whether src/dst ip address used */
5363                 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
5364                     !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
5365                         *unused |= BIT(INNER_SRC_IP);
5366
5367                 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
5368                     !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
5369                         *unused |= BIT(INNER_DST_IP);
5370
5371                 if (!tcp_ip6_spec->psrc)
5372                         *unused |= BIT(INNER_SRC_PORT);
5373
5374                 if (!tcp_ip6_spec->pdst)
5375                         *unused |= BIT(INNER_DST_PORT);
5376
5377                 if (tcp_ip6_spec->tclass)
5378                         return -EOPNOTSUPP;
5379
5380                 break;
5381         case IPV6_USER_FLOW:
5382                 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
5383                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5384                         BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
5385                         BIT(INNER_DST_PORT);
5386
5387                 /* check whether src/dst ip address used */
5388                 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
5389                     !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
5390                         *unused |= BIT(INNER_SRC_IP);
5391
5392                 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
5393                     !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
5394                         *unused |= BIT(INNER_DST_IP);
5395
5396                 if (!usr_ip6_spec->l4_proto)
5397                         *unused |= BIT(INNER_IP_PROTO);
5398
5399                 if (usr_ip6_spec->tclass)
5400                         return -EOPNOTSUPP;
5401
5402                 if (usr_ip6_spec->l4_4_bytes)
5403                         return -EOPNOTSUPP;
5404
5405                 break;
5406         case ETHER_FLOW:
5407                 ether_spec = &fs->h_u.ether_spec;
5408                 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5409                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5410                         BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5411
5412                 if (is_zero_ether_addr(ether_spec->h_source))
5413                         *unused |= BIT(INNER_SRC_MAC);
5414
5415                 if (is_zero_ether_addr(ether_spec->h_dest))
5416                         *unused |= BIT(INNER_DST_MAC);
5417
5418                 if (!ether_spec->h_proto)
5419                         *unused |= BIT(INNER_ETH_TYPE);
5420
5421                 break;
5422         default:
5423                 return -EOPNOTSUPP;
5424         }
5425
5426         if ((fs->flow_type & FLOW_EXT)) {
5427                 if (fs->h_ext.vlan_etype)
5428                         return -EOPNOTSUPP;
5429                 if (!fs->h_ext.vlan_tci)
5430                         *unused |= BIT(INNER_VLAN_TAG_FST);
5431
5432                 if (fs->m_ext.vlan_tci) {
5433                         if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5434                                 return -EINVAL;
5435                 }
5436         } else {
5437                 *unused |= BIT(INNER_VLAN_TAG_FST);
5438         }
5439
5440         if (fs->flow_type & FLOW_MAC_EXT) {
5441                 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5442                         return -EOPNOTSUPP;
5443
5444                 if (is_zero_ether_addr(fs->h_ext.h_dest))
5445                         *unused |= BIT(INNER_DST_MAC);
5446                 else
5447                         *unused &= ~(BIT(INNER_DST_MAC));
5448         }
5449
5450         return 0;
5451 }
5452
5453 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5454 {
5455         struct hclge_fd_rule *rule = NULL;
5456         struct hlist_node *node2;
5457
5458         spin_lock_bh(&hdev->fd_rule_lock);
5459         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5460                 if (rule->location >= location)
5461                         break;
5462         }
5463
5464         spin_unlock_bh(&hdev->fd_rule_lock);
5465
5466         return  rule && rule->location == location;
5467 }
5468
5469 /* make sure being called after lock up with fd_rule_lock */
5470 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5471                                      struct hclge_fd_rule *new_rule,
5472                                      u16 location,
5473                                      bool is_add)
5474 {
5475         struct hclge_fd_rule *rule = NULL, *parent = NULL;
5476         struct hlist_node *node2;
5477
5478         if (is_add && !new_rule)
5479                 return -EINVAL;
5480
5481         hlist_for_each_entry_safe(rule, node2,
5482                                   &hdev->fd_rule_list, rule_node) {
5483                 if (rule->location >= location)
5484                         break;
5485                 parent = rule;
5486         }
5487
5488         if (rule && rule->location == location) {
5489                 hlist_del(&rule->rule_node);
5490                 kfree(rule);
5491                 hdev->hclge_fd_rule_num--;
5492
5493                 if (!is_add) {
5494                         if (!hdev->hclge_fd_rule_num)
5495                                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5496                         clear_bit(location, hdev->fd_bmap);
5497
5498                         return 0;
5499                 }
5500         } else if (!is_add) {
5501                 dev_err(&hdev->pdev->dev,
5502                         "delete fail, rule %u is inexistent\n",
5503                         location);
5504                 return -EINVAL;
5505         }
5506
5507         INIT_HLIST_NODE(&new_rule->rule_node);
5508
5509         if (parent)
5510                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5511         else
5512                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5513
5514         set_bit(location, hdev->fd_bmap);
5515         hdev->hclge_fd_rule_num++;
5516         hdev->fd_active_type = new_rule->rule_type;
5517
5518         return 0;
5519 }
5520
5521 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5522                               struct ethtool_rx_flow_spec *fs,
5523                               struct hclge_fd_rule *rule)
5524 {
5525         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5526
5527         switch (flow_type) {
5528         case SCTP_V4_FLOW:
5529         case TCP_V4_FLOW:
5530         case UDP_V4_FLOW:
5531                 rule->tuples.src_ip[IPV4_INDEX] =
5532                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5533                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5534                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5535
5536                 rule->tuples.dst_ip[IPV4_INDEX] =
5537                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5538                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5539                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5540
5541                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5542                 rule->tuples_mask.src_port =
5543                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5544
5545                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5546                 rule->tuples_mask.dst_port =
5547                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5548
5549                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5550                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5551
5552                 rule->tuples.ether_proto = ETH_P_IP;
5553                 rule->tuples_mask.ether_proto = 0xFFFF;
5554
5555                 break;
5556         case IP_USER_FLOW:
5557                 rule->tuples.src_ip[IPV4_INDEX] =
5558                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5559                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5560                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5561
5562                 rule->tuples.dst_ip[IPV4_INDEX] =
5563                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5564                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5565                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5566
5567                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5568                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5569
5570                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5571                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5572
5573                 rule->tuples.ether_proto = ETH_P_IP;
5574                 rule->tuples_mask.ether_proto = 0xFFFF;
5575
5576                 break;
5577         case SCTP_V6_FLOW:
5578         case TCP_V6_FLOW:
5579         case UDP_V6_FLOW:
5580                 be32_to_cpu_array(rule->tuples.src_ip,
5581                                   fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5582                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5583                                   fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5584
5585                 be32_to_cpu_array(rule->tuples.dst_ip,
5586                                   fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5587                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5588                                   fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5589
5590                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5591                 rule->tuples_mask.src_port =
5592                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5593
5594                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5595                 rule->tuples_mask.dst_port =
5596                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5597
5598                 rule->tuples.ether_proto = ETH_P_IPV6;
5599                 rule->tuples_mask.ether_proto = 0xFFFF;
5600
5601                 break;
5602         case IPV6_USER_FLOW:
5603                 be32_to_cpu_array(rule->tuples.src_ip,
5604                                   fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5605                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5606                                   fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5607
5608                 be32_to_cpu_array(rule->tuples.dst_ip,
5609                                   fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5610                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5611                                   fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5612
5613                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5614                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5615
5616                 rule->tuples.ether_proto = ETH_P_IPV6;
5617                 rule->tuples_mask.ether_proto = 0xFFFF;
5618
5619                 break;
5620         case ETHER_FLOW:
5621                 ether_addr_copy(rule->tuples.src_mac,
5622                                 fs->h_u.ether_spec.h_source);
5623                 ether_addr_copy(rule->tuples_mask.src_mac,
5624                                 fs->m_u.ether_spec.h_source);
5625
5626                 ether_addr_copy(rule->tuples.dst_mac,
5627                                 fs->h_u.ether_spec.h_dest);
5628                 ether_addr_copy(rule->tuples_mask.dst_mac,
5629                                 fs->m_u.ether_spec.h_dest);
5630
5631                 rule->tuples.ether_proto =
5632                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5633                 rule->tuples_mask.ether_proto =
5634                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5635
5636                 break;
5637         default:
5638                 return -EOPNOTSUPP;
5639         }
5640
5641         switch (flow_type) {
5642         case SCTP_V4_FLOW:
5643         case SCTP_V6_FLOW:
5644                 rule->tuples.ip_proto = IPPROTO_SCTP;
5645                 rule->tuples_mask.ip_proto = 0xFF;
5646                 break;
5647         case TCP_V4_FLOW:
5648         case TCP_V6_FLOW:
5649                 rule->tuples.ip_proto = IPPROTO_TCP;
5650                 rule->tuples_mask.ip_proto = 0xFF;
5651                 break;
5652         case UDP_V4_FLOW:
5653         case UDP_V6_FLOW:
5654                 rule->tuples.ip_proto = IPPROTO_UDP;
5655                 rule->tuples_mask.ip_proto = 0xFF;
5656                 break;
5657         default:
5658                 break;
5659         }
5660
5661         if ((fs->flow_type & FLOW_EXT)) {
5662                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5663                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5664         }
5665
5666         if (fs->flow_type & FLOW_MAC_EXT) {
5667                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5668                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5669         }
5670
5671         return 0;
5672 }
5673
5674 /* make sure being called after lock up with fd_rule_lock */
5675 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5676                                 struct hclge_fd_rule *rule)
5677 {
5678         int ret;
5679
5680         if (!rule) {
5681                 dev_err(&hdev->pdev->dev,
5682                         "The flow director rule is NULL\n");
5683                 return -EINVAL;
5684         }
5685
5686         /* it will never fail here, so needn't to check return value */
5687         hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5688
5689         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5690         if (ret)
5691                 goto clear_rule;
5692
5693         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5694         if (ret)
5695                 goto clear_rule;
5696
5697         return 0;
5698
5699 clear_rule:
5700         hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5701         return ret;
5702 }
5703
5704 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5705                               struct ethtool_rxnfc *cmd)
5706 {
5707         struct hclge_vport *vport = hclge_get_vport(handle);
5708         struct hclge_dev *hdev = vport->back;
5709         u16 dst_vport_id = 0, q_index = 0;
5710         struct ethtool_rx_flow_spec *fs;
5711         struct hclge_fd_rule *rule;
5712         u32 unused = 0;
5713         u8 action;
5714         int ret;
5715
5716         if (!hnae3_dev_fd_supported(hdev))
5717                 return -EOPNOTSUPP;
5718
5719         if (!hdev->fd_en) {
5720                 dev_warn(&hdev->pdev->dev,
5721                          "Please enable flow director first\n");
5722                 return -EOPNOTSUPP;
5723         }
5724
5725         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5726
5727         ret = hclge_fd_check_spec(hdev, fs, &unused);
5728         if (ret) {
5729                 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5730                 return ret;
5731         }
5732
5733         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5734                 action = HCLGE_FD_ACTION_DROP_PACKET;
5735         } else {
5736                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5737                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5738                 u16 tqps;
5739
5740                 if (vf > hdev->num_req_vfs) {
5741                         dev_err(&hdev->pdev->dev,
5742                                 "Error: vf id (%u) > max vf num (%u)\n",
5743                                 vf, hdev->num_req_vfs);
5744                         return -EINVAL;
5745                 }
5746
5747                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5748                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5749
5750                 if (ring >= tqps) {
5751                         dev_err(&hdev->pdev->dev,
5752                                 "Error: queue id (%u) > max tqp num (%u)\n",
5753                                 ring, tqps - 1);
5754                         return -EINVAL;
5755                 }
5756
5757                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5758                 q_index = ring;
5759         }
5760
5761         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5762         if (!rule)
5763                 return -ENOMEM;
5764
5765         ret = hclge_fd_get_tuple(hdev, fs, rule);
5766         if (ret) {
5767                 kfree(rule);
5768                 return ret;
5769         }
5770
5771         rule->flow_type = fs->flow_type;
5772
5773         rule->location = fs->location;
5774         rule->unused_tuple = unused;
5775         rule->vf_id = dst_vport_id;
5776         rule->queue_id = q_index;
5777         rule->action = action;
5778         rule->rule_type = HCLGE_FD_EP_ACTIVE;
5779
5780         /* to avoid rule conflict, when user configure rule by ethtool,
5781          * we need to clear all arfs rules
5782          */
5783         hclge_clear_arfs_rules(handle);
5784
5785         spin_lock_bh(&hdev->fd_rule_lock);
5786         ret = hclge_fd_config_rule(hdev, rule);
5787
5788         spin_unlock_bh(&hdev->fd_rule_lock);
5789
5790         return ret;
5791 }
5792
5793 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5794                               struct ethtool_rxnfc *cmd)
5795 {
5796         struct hclge_vport *vport = hclge_get_vport(handle);
5797         struct hclge_dev *hdev = vport->back;
5798         struct ethtool_rx_flow_spec *fs;
5799         int ret;
5800
5801         if (!hnae3_dev_fd_supported(hdev))
5802                 return -EOPNOTSUPP;
5803
5804         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5805
5806         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5807                 return -EINVAL;
5808
5809         if (!hclge_fd_rule_exist(hdev, fs->location)) {
5810                 dev_err(&hdev->pdev->dev,
5811                         "Delete fail, rule %u is inexistent\n", fs->location);
5812                 return -ENOENT;
5813         }
5814
5815         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5816                                    NULL, false);
5817         if (ret)
5818                 return ret;
5819
5820         spin_lock_bh(&hdev->fd_rule_lock);
5821         ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5822
5823         spin_unlock_bh(&hdev->fd_rule_lock);
5824
5825         return ret;
5826 }
5827
5828 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5829                                      bool clear_list)
5830 {
5831         struct hclge_vport *vport = hclge_get_vport(handle);
5832         struct hclge_dev *hdev = vport->back;
5833         struct hclge_fd_rule *rule;
5834         struct hlist_node *node;
5835         u16 location;
5836
5837         if (!hnae3_dev_fd_supported(hdev))
5838                 return;
5839
5840         spin_lock_bh(&hdev->fd_rule_lock);
5841         for_each_set_bit(location, hdev->fd_bmap,
5842                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5843                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5844                                      NULL, false);
5845
5846         if (clear_list) {
5847                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5848                                           rule_node) {
5849                         hlist_del(&rule->rule_node);
5850                         kfree(rule);
5851                 }
5852                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5853                 hdev->hclge_fd_rule_num = 0;
5854                 bitmap_zero(hdev->fd_bmap,
5855                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5856         }
5857
5858         spin_unlock_bh(&hdev->fd_rule_lock);
5859 }
5860
5861 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5862 {
5863         struct hclge_vport *vport = hclge_get_vport(handle);
5864         struct hclge_dev *hdev = vport->back;
5865         struct hclge_fd_rule *rule;
5866         struct hlist_node *node;
5867         int ret;
5868
5869         /* Return ok here, because reset error handling will check this
5870          * return value. If error is returned here, the reset process will
5871          * fail.
5872          */
5873         if (!hnae3_dev_fd_supported(hdev))
5874                 return 0;
5875
5876         /* if fd is disabled, should not restore it when reset */
5877         if (!hdev->fd_en)
5878                 return 0;
5879
5880         spin_lock_bh(&hdev->fd_rule_lock);
5881         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5882                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5883                 if (!ret)
5884                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5885
5886                 if (ret) {
5887                         dev_warn(&hdev->pdev->dev,
5888                                  "Restore rule %u failed, remove it\n",
5889                                  rule->location);
5890                         clear_bit(rule->location, hdev->fd_bmap);
5891                         hlist_del(&rule->rule_node);
5892                         kfree(rule);
5893                         hdev->hclge_fd_rule_num--;
5894                 }
5895         }
5896
5897         if (hdev->hclge_fd_rule_num)
5898                 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5899
5900         spin_unlock_bh(&hdev->fd_rule_lock);
5901
5902         return 0;
5903 }
5904
5905 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5906                                  struct ethtool_rxnfc *cmd)
5907 {
5908         struct hclge_vport *vport = hclge_get_vport(handle);
5909         struct hclge_dev *hdev = vport->back;
5910
5911         if (!hnae3_dev_fd_supported(hdev))
5912                 return -EOPNOTSUPP;
5913
5914         cmd->rule_cnt = hdev->hclge_fd_rule_num;
5915         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5916
5917         return 0;
5918 }
5919
5920 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5921                                   struct ethtool_rxnfc *cmd)
5922 {
5923         struct hclge_vport *vport = hclge_get_vport(handle);
5924         struct hclge_fd_rule *rule = NULL;
5925         struct hclge_dev *hdev = vport->back;
5926         struct ethtool_rx_flow_spec *fs;
5927         struct hlist_node *node2;
5928
5929         if (!hnae3_dev_fd_supported(hdev))
5930                 return -EOPNOTSUPP;
5931
5932         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5933
5934         spin_lock_bh(&hdev->fd_rule_lock);
5935
5936         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5937                 if (rule->location >= fs->location)
5938                         break;
5939         }
5940
5941         if (!rule || fs->location != rule->location) {
5942                 spin_unlock_bh(&hdev->fd_rule_lock);
5943
5944                 return -ENOENT;
5945         }
5946
5947         fs->flow_type = rule->flow_type;
5948         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5949         case SCTP_V4_FLOW:
5950         case TCP_V4_FLOW:
5951         case UDP_V4_FLOW:
5952                 fs->h_u.tcp_ip4_spec.ip4src =
5953                                 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5954                 fs->m_u.tcp_ip4_spec.ip4src =
5955                         rule->unused_tuple & BIT(INNER_SRC_IP) ?
5956                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5957
5958                 fs->h_u.tcp_ip4_spec.ip4dst =
5959                                 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5960                 fs->m_u.tcp_ip4_spec.ip4dst =
5961                         rule->unused_tuple & BIT(INNER_DST_IP) ?
5962                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5963
5964                 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5965                 fs->m_u.tcp_ip4_spec.psrc =
5966                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5967                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5968
5969                 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5970                 fs->m_u.tcp_ip4_spec.pdst =
5971                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5972                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5973
5974                 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5975                 fs->m_u.tcp_ip4_spec.tos =
5976                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5977                                 0 : rule->tuples_mask.ip_tos;
5978
5979                 break;
5980         case IP_USER_FLOW:
5981                 fs->h_u.usr_ip4_spec.ip4src =
5982                                 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5983                 fs->m_u.tcp_ip4_spec.ip4src =
5984                         rule->unused_tuple & BIT(INNER_SRC_IP) ?
5985                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5986
5987                 fs->h_u.usr_ip4_spec.ip4dst =
5988                                 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5989                 fs->m_u.usr_ip4_spec.ip4dst =
5990                         rule->unused_tuple & BIT(INNER_DST_IP) ?
5991                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5992
5993                 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5994                 fs->m_u.usr_ip4_spec.tos =
5995                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5996                                 0 : rule->tuples_mask.ip_tos;
5997
5998                 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5999                 fs->m_u.usr_ip4_spec.proto =
6000                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6001                                 0 : rule->tuples_mask.ip_proto;
6002
6003                 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
6004
6005                 break;
6006         case SCTP_V6_FLOW:
6007         case TCP_V6_FLOW:
6008         case UDP_V6_FLOW:
6009                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
6010                                   rule->tuples.src_ip, IPV6_SIZE);
6011                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6012                         memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
6013                                sizeof(int) * IPV6_SIZE);
6014                 else
6015                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
6016                                           rule->tuples_mask.src_ip, IPV6_SIZE);
6017
6018                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
6019                                   rule->tuples.dst_ip, IPV6_SIZE);
6020                 if (rule->unused_tuple & BIT(INNER_DST_IP))
6021                         memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
6022                                sizeof(int) * IPV6_SIZE);
6023                 else
6024                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
6025                                           rule->tuples_mask.dst_ip, IPV6_SIZE);
6026
6027                 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
6028                 fs->m_u.tcp_ip6_spec.psrc =
6029                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6030                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
6031
6032                 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
6033                 fs->m_u.tcp_ip6_spec.pdst =
6034                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
6035                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6036
6037                 break;
6038         case IPV6_USER_FLOW:
6039                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
6040                                   rule->tuples.src_ip, IPV6_SIZE);
6041                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6042                         memset(fs->m_u.usr_ip6_spec.ip6src, 0,
6043                                sizeof(int) * IPV6_SIZE);
6044                 else
6045                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
6046                                           rule->tuples_mask.src_ip, IPV6_SIZE);
6047
6048                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
6049                                   rule->tuples.dst_ip, IPV6_SIZE);
6050                 if (rule->unused_tuple & BIT(INNER_DST_IP))
6051                         memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
6052                                sizeof(int) * IPV6_SIZE);
6053                 else
6054                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
6055                                           rule->tuples_mask.dst_ip, IPV6_SIZE);
6056
6057                 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
6058                 fs->m_u.usr_ip6_spec.l4_proto =
6059                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6060                                 0 : rule->tuples_mask.ip_proto;
6061
6062                 break;
6063         case ETHER_FLOW:
6064                 ether_addr_copy(fs->h_u.ether_spec.h_source,
6065                                 rule->tuples.src_mac);
6066                 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6067                         eth_zero_addr(fs->m_u.ether_spec.h_source);
6068                 else
6069                         ether_addr_copy(fs->m_u.ether_spec.h_source,
6070                                         rule->tuples_mask.src_mac);
6071
6072                 ether_addr_copy(fs->h_u.ether_spec.h_dest,
6073                                 rule->tuples.dst_mac);
6074                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6075                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
6076                 else
6077                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
6078                                         rule->tuples_mask.dst_mac);
6079
6080                 fs->h_u.ether_spec.h_proto =
6081                                 cpu_to_be16(rule->tuples.ether_proto);
6082                 fs->m_u.ether_spec.h_proto =
6083                                 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6084                                 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6085
6086                 break;
6087         default:
6088                 spin_unlock_bh(&hdev->fd_rule_lock);
6089                 return -EOPNOTSUPP;
6090         }
6091
6092         if (fs->flow_type & FLOW_EXT) {
6093                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6094                 fs->m_ext.vlan_tci =
6095                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6096                                 cpu_to_be16(VLAN_VID_MASK) :
6097                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
6098         }
6099
6100         if (fs->flow_type & FLOW_MAC_EXT) {
6101                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6102                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6103                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
6104                 else
6105                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
6106                                         rule->tuples_mask.dst_mac);
6107         }
6108
6109         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6110                 fs->ring_cookie = RX_CLS_FLOW_DISC;
6111         } else {
6112                 u64 vf_id;
6113
6114                 fs->ring_cookie = rule->queue_id;
6115                 vf_id = rule->vf_id;
6116                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6117                 fs->ring_cookie |= vf_id;
6118         }
6119
6120         spin_unlock_bh(&hdev->fd_rule_lock);
6121
6122         return 0;
6123 }
6124
6125 static int hclge_get_all_rules(struct hnae3_handle *handle,
6126                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
6127 {
6128         struct hclge_vport *vport = hclge_get_vport(handle);
6129         struct hclge_dev *hdev = vport->back;
6130         struct hclge_fd_rule *rule;
6131         struct hlist_node *node2;
6132         int cnt = 0;
6133
6134         if (!hnae3_dev_fd_supported(hdev))
6135                 return -EOPNOTSUPP;
6136
6137         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6138
6139         spin_lock_bh(&hdev->fd_rule_lock);
6140         hlist_for_each_entry_safe(rule, node2,
6141                                   &hdev->fd_rule_list, rule_node) {
6142                 if (cnt == cmd->rule_cnt) {
6143                         spin_unlock_bh(&hdev->fd_rule_lock);
6144                         return -EMSGSIZE;
6145                 }
6146
6147                 rule_locs[cnt] = rule->location;
6148                 cnt++;
6149         }
6150
6151         spin_unlock_bh(&hdev->fd_rule_lock);
6152
6153         cmd->rule_cnt = cnt;
6154
6155         return 0;
6156 }
6157
6158 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6159                                      struct hclge_fd_rule_tuples *tuples)
6160 {
6161         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6162         tuples->ip_proto = fkeys->basic.ip_proto;
6163         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6164
6165         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6166                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6167                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6168         } else {
6169                 memcpy(tuples->src_ip,
6170                        fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
6171                        sizeof(tuples->src_ip));
6172                 memcpy(tuples->dst_ip,
6173                        fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
6174                        sizeof(tuples->dst_ip));
6175         }
6176 }
6177
6178 /* traverse all rules, check whether an existed rule has the same tuples */
6179 static struct hclge_fd_rule *
6180 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6181                           const struct hclge_fd_rule_tuples *tuples)
6182 {
6183         struct hclge_fd_rule *rule = NULL;
6184         struct hlist_node *node;
6185
6186         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6187                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6188                         return rule;
6189         }
6190
6191         return NULL;
6192 }
6193
6194 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6195                                      struct hclge_fd_rule *rule)
6196 {
6197         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6198                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6199                              BIT(INNER_SRC_PORT);
6200         rule->action = 0;
6201         rule->vf_id = 0;
6202         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6203         if (tuples->ether_proto == ETH_P_IP) {
6204                 if (tuples->ip_proto == IPPROTO_TCP)
6205                         rule->flow_type = TCP_V4_FLOW;
6206                 else
6207                         rule->flow_type = UDP_V4_FLOW;
6208         } else {
6209                 if (tuples->ip_proto == IPPROTO_TCP)
6210                         rule->flow_type = TCP_V6_FLOW;
6211                 else
6212                         rule->flow_type = UDP_V6_FLOW;
6213         }
6214         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6215         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6216 }
6217
6218 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6219                                       u16 flow_id, struct flow_keys *fkeys)
6220 {
6221         struct hclge_vport *vport = hclge_get_vport(handle);
6222         struct hclge_fd_rule_tuples new_tuples;
6223         struct hclge_dev *hdev = vport->back;
6224         struct hclge_fd_rule *rule;
6225         u16 tmp_queue_id;
6226         u16 bit_id;
6227         int ret;
6228
6229         if (!hnae3_dev_fd_supported(hdev))
6230                 return -EOPNOTSUPP;
6231
6232         memset(&new_tuples, 0, sizeof(new_tuples));
6233         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6234
6235         spin_lock_bh(&hdev->fd_rule_lock);
6236
6237         /* when there is already fd rule existed add by user,
6238          * arfs should not work
6239          */
6240         if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6241                 spin_unlock_bh(&hdev->fd_rule_lock);
6242
6243                 return -EOPNOTSUPP;
6244         }
6245
6246         /* check is there flow director filter existed for this flow,
6247          * if not, create a new filter for it;
6248          * if filter exist with different queue id, modify the filter;
6249          * if filter exist with same queue id, do nothing
6250          */
6251         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6252         if (!rule) {
6253                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6254                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6255                         spin_unlock_bh(&hdev->fd_rule_lock);
6256
6257                         return -ENOSPC;
6258                 }
6259
6260                 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6261                 if (!rule) {
6262                         spin_unlock_bh(&hdev->fd_rule_lock);
6263
6264                         return -ENOMEM;
6265                 }
6266
6267                 set_bit(bit_id, hdev->fd_bmap);
6268                 rule->location = bit_id;
6269                 rule->flow_id = flow_id;
6270                 rule->queue_id = queue_id;
6271                 hclge_fd_build_arfs_rule(&new_tuples, rule);
6272                 ret = hclge_fd_config_rule(hdev, rule);
6273
6274                 spin_unlock_bh(&hdev->fd_rule_lock);
6275
6276                 if (ret)
6277                         return ret;
6278
6279                 return rule->location;
6280         }
6281
6282         spin_unlock_bh(&hdev->fd_rule_lock);
6283
6284         if (rule->queue_id == queue_id)
6285                 return rule->location;
6286
6287         tmp_queue_id = rule->queue_id;
6288         rule->queue_id = queue_id;
6289         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6290         if (ret) {
6291                 rule->queue_id = tmp_queue_id;
6292                 return ret;
6293         }
6294
6295         return rule->location;
6296 }
6297
6298 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6299 {
6300 #ifdef CONFIG_RFS_ACCEL
6301         struct hnae3_handle *handle = &hdev->vport[0].nic;
6302         struct hclge_fd_rule *rule;
6303         struct hlist_node *node;
6304         HLIST_HEAD(del_list);
6305
6306         spin_lock_bh(&hdev->fd_rule_lock);
6307         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6308                 spin_unlock_bh(&hdev->fd_rule_lock);
6309                 return;
6310         }
6311         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6312                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6313                                         rule->flow_id, rule->location)) {
6314                         hlist_del_init(&rule->rule_node);
6315                         hlist_add_head(&rule->rule_node, &del_list);
6316                         hdev->hclge_fd_rule_num--;
6317                         clear_bit(rule->location, hdev->fd_bmap);
6318                 }
6319         }
6320         spin_unlock_bh(&hdev->fd_rule_lock);
6321
6322         hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6323                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6324                                      rule->location, NULL, false);
6325                 kfree(rule);
6326         }
6327 #endif
6328 }
6329
6330 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6331 {
6332 #ifdef CONFIG_RFS_ACCEL
6333         struct hclge_vport *vport = hclge_get_vport(handle);
6334         struct hclge_dev *hdev = vport->back;
6335
6336         if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6337                 hclge_del_all_fd_entries(handle, true);
6338 #endif
6339 }
6340
6341 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6342 {
6343         struct hclge_vport *vport = hclge_get_vport(handle);
6344         struct hclge_dev *hdev = vport->back;
6345
6346         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6347                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6348 }
6349
6350 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6351 {
6352         struct hclge_vport *vport = hclge_get_vport(handle);
6353         struct hclge_dev *hdev = vport->back;
6354
6355         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6356 }
6357
6358 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6359 {
6360         struct hclge_vport *vport = hclge_get_vport(handle);
6361         struct hclge_dev *hdev = vport->back;
6362
6363         return hdev->rst_stats.hw_reset_done_cnt;
6364 }
6365
6366 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6367 {
6368         struct hclge_vport *vport = hclge_get_vport(handle);
6369         struct hclge_dev *hdev = vport->back;
6370         bool clear;
6371
6372         hdev->fd_en = enable;
6373         clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6374         if (!enable)
6375                 hclge_del_all_fd_entries(handle, clear);
6376         else
6377                 hclge_restore_fd_entries(handle);
6378 }
6379
6380 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6381 {
6382         struct hclge_desc desc;
6383         struct hclge_config_mac_mode_cmd *req =
6384                 (struct hclge_config_mac_mode_cmd *)desc.data;
6385         u32 loop_en = 0;
6386         int ret;
6387
6388         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6389
6390         if (enable) {
6391                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6392                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6393                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6394                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6395                 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6396                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6397                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6398                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6399                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6400                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6401         }
6402
6403         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6404
6405         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6406         if (ret)
6407                 dev_err(&hdev->pdev->dev,
6408                         "mac enable fail, ret =%d.\n", ret);
6409 }
6410
6411 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6412                                      u8 switch_param, u8 param_mask)
6413 {
6414         struct hclge_mac_vlan_switch_cmd *req;
6415         struct hclge_desc desc;
6416         u32 func_id;
6417         int ret;
6418
6419         func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6420         req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6421
6422         /* read current config parameter */
6423         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6424                                    true);
6425         req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6426         req->func_id = cpu_to_le32(func_id);
6427
6428         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6429         if (ret) {
6430                 dev_err(&hdev->pdev->dev,
6431                         "read mac vlan switch parameter fail, ret = %d\n", ret);
6432                 return ret;
6433         }
6434
6435         /* modify and write new config parameter */
6436         hclge_cmd_reuse_desc(&desc, false);
6437         req->switch_param = (req->switch_param & param_mask) | switch_param;
6438         req->param_mask = param_mask;
6439
6440         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6441         if (ret)
6442                 dev_err(&hdev->pdev->dev,
6443                         "set mac vlan switch parameter fail, ret = %d\n", ret);
6444         return ret;
6445 }
6446
6447 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6448                                        int link_ret)
6449 {
6450 #define HCLGE_PHY_LINK_STATUS_NUM  200
6451
6452         struct phy_device *phydev = hdev->hw.mac.phydev;
6453         int i = 0;
6454         int ret;
6455
6456         do {
6457                 ret = phy_read_status(phydev);
6458                 if (ret) {
6459                         dev_err(&hdev->pdev->dev,
6460                                 "phy update link status fail, ret = %d\n", ret);
6461                         return;
6462                 }
6463
6464                 if (phydev->link == link_ret)
6465                         break;
6466
6467                 msleep(HCLGE_LINK_STATUS_MS);
6468         } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6469 }
6470
6471 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6472 {
6473 #define HCLGE_MAC_LINK_STATUS_NUM  100
6474
6475         int i = 0;
6476         int ret;
6477
6478         do {
6479                 ret = hclge_get_mac_link_status(hdev);
6480                 if (ret < 0)
6481                         return ret;
6482                 else if (ret == link_ret)
6483                         return 0;
6484
6485                 msleep(HCLGE_LINK_STATUS_MS);
6486         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6487         return -EBUSY;
6488 }
6489
6490 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6491                                           bool is_phy)
6492 {
6493 #define HCLGE_LINK_STATUS_DOWN 0
6494 #define HCLGE_LINK_STATUS_UP   1
6495
6496         int link_ret;
6497
6498         link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6499
6500         if (is_phy)
6501                 hclge_phy_link_status_wait(hdev, link_ret);
6502
6503         return hclge_mac_link_status_wait(hdev, link_ret);
6504 }
6505
6506 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6507 {
6508         struct hclge_config_mac_mode_cmd *req;
6509         struct hclge_desc desc;
6510         u32 loop_en;
6511         int ret;
6512
6513         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6514         /* 1 Read out the MAC mode config at first */
6515         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6516         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6517         if (ret) {
6518                 dev_err(&hdev->pdev->dev,
6519                         "mac loopback get fail, ret =%d.\n", ret);
6520                 return ret;
6521         }
6522
6523         /* 2 Then setup the loopback flag */
6524         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6525         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6526         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6527         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6528
6529         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6530
6531         /* 3 Config mac work mode with loopback flag
6532          * and its original configure parameters
6533          */
6534         hclge_cmd_reuse_desc(&desc, false);
6535         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6536         if (ret)
6537                 dev_err(&hdev->pdev->dev,
6538                         "mac loopback set fail, ret =%d.\n", ret);
6539         return ret;
6540 }
6541
6542 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6543                                      enum hnae3_loop loop_mode)
6544 {
6545 #define HCLGE_SERDES_RETRY_MS   10
6546 #define HCLGE_SERDES_RETRY_NUM  100
6547
6548         struct hclge_serdes_lb_cmd *req;
6549         struct hclge_desc desc;
6550         int ret, i = 0;
6551         u8 loop_mode_b;
6552
6553         req = (struct hclge_serdes_lb_cmd *)desc.data;
6554         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6555
6556         switch (loop_mode) {
6557         case HNAE3_LOOP_SERIAL_SERDES:
6558                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6559                 break;
6560         case HNAE3_LOOP_PARALLEL_SERDES:
6561                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6562                 break;
6563         default:
6564                 dev_err(&hdev->pdev->dev,
6565                         "unsupported serdes loopback mode %d\n", loop_mode);
6566                 return -ENOTSUPP;
6567         }
6568
6569         if (en) {
6570                 req->enable = loop_mode_b;
6571                 req->mask = loop_mode_b;
6572         } else {
6573                 req->mask = loop_mode_b;
6574         }
6575
6576         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6577         if (ret) {
6578                 dev_err(&hdev->pdev->dev,
6579                         "serdes loopback set fail, ret = %d\n", ret);
6580                 return ret;
6581         }
6582
6583         do {
6584                 msleep(HCLGE_SERDES_RETRY_MS);
6585                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6586                                            true);
6587                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6588                 if (ret) {
6589                         dev_err(&hdev->pdev->dev,
6590                                 "serdes loopback get, ret = %d\n", ret);
6591                         return ret;
6592                 }
6593         } while (++i < HCLGE_SERDES_RETRY_NUM &&
6594                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
6595
6596         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6597                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6598                 return -EBUSY;
6599         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6600                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6601                 return -EIO;
6602         }
6603         return ret;
6604 }
6605
6606 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6607                                      enum hnae3_loop loop_mode)
6608 {
6609         int ret;
6610
6611         ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6612         if (ret)
6613                 return ret;
6614
6615         hclge_cfg_mac_mode(hdev, en);
6616
6617         ret = hclge_mac_phy_link_status_wait(hdev, en, FALSE);
6618         if (ret)
6619                 dev_err(&hdev->pdev->dev,
6620                         "serdes loopback config mac mode timeout\n");
6621
6622         return ret;
6623 }
6624
6625 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6626                                      struct phy_device *phydev)
6627 {
6628         int ret;
6629
6630         if (!phydev->suspended) {
6631                 ret = phy_suspend(phydev);
6632                 if (ret)
6633                         return ret;
6634         }
6635
6636         ret = phy_resume(phydev);
6637         if (ret)
6638                 return ret;
6639
6640         return phy_loopback(phydev, true);
6641 }
6642
6643 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6644                                       struct phy_device *phydev)
6645 {
6646         int ret;
6647
6648         ret = phy_loopback(phydev, false);
6649         if (ret)
6650                 return ret;
6651
6652         return phy_suspend(phydev);
6653 }
6654
6655 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6656 {
6657         struct phy_device *phydev = hdev->hw.mac.phydev;
6658         int ret;
6659
6660         if (!phydev)
6661                 return -ENOTSUPP;
6662
6663         if (en)
6664                 ret = hclge_enable_phy_loopback(hdev, phydev);
6665         else
6666                 ret = hclge_disable_phy_loopback(hdev, phydev);
6667         if (ret) {
6668                 dev_err(&hdev->pdev->dev,
6669                         "set phy loopback fail, ret = %d\n", ret);
6670                 return ret;
6671         }
6672
6673         hclge_cfg_mac_mode(hdev, en);
6674
6675         ret = hclge_mac_phy_link_status_wait(hdev, en, TRUE);
6676         if (ret)
6677                 dev_err(&hdev->pdev->dev,
6678                         "phy loopback config mac mode timeout\n");
6679
6680         return ret;
6681 }
6682
6683 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6684                             int stream_id, bool enable)
6685 {
6686         struct hclge_desc desc;
6687         struct hclge_cfg_com_tqp_queue_cmd *req =
6688                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6689         int ret;
6690
6691         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6692         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6693         req->stream_id = cpu_to_le16(stream_id);
6694         if (enable)
6695                 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6696
6697         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6698         if (ret)
6699                 dev_err(&hdev->pdev->dev,
6700                         "Tqp enable fail, status =%d.\n", ret);
6701         return ret;
6702 }
6703
6704 static int hclge_set_loopback(struct hnae3_handle *handle,
6705                               enum hnae3_loop loop_mode, bool en)
6706 {
6707         struct hclge_vport *vport = hclge_get_vport(handle);
6708         struct hnae3_knic_private_info *kinfo;
6709         struct hclge_dev *hdev = vport->back;
6710         int i, ret;
6711
6712         /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6713          * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6714          * the same, the packets are looped back in the SSU. If SSU loopback
6715          * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6716          */
6717         if (hdev->pdev->revision >= 0x21) {
6718                 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6719
6720                 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6721                                                 HCLGE_SWITCH_ALW_LPBK_MASK);
6722                 if (ret)
6723                         return ret;
6724         }
6725
6726         switch (loop_mode) {
6727         case HNAE3_LOOP_APP:
6728                 ret = hclge_set_app_loopback(hdev, en);
6729                 break;
6730         case HNAE3_LOOP_SERIAL_SERDES:
6731         case HNAE3_LOOP_PARALLEL_SERDES:
6732                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6733                 break;
6734         case HNAE3_LOOP_PHY:
6735                 ret = hclge_set_phy_loopback(hdev, en);
6736                 break;
6737         default:
6738                 ret = -ENOTSUPP;
6739                 dev_err(&hdev->pdev->dev,
6740                         "loop_mode %d is not supported\n", loop_mode);
6741                 break;
6742         }
6743
6744         if (ret)
6745                 return ret;
6746
6747         kinfo = &vport->nic.kinfo;
6748         for (i = 0; i < kinfo->num_tqps; i++) {
6749                 ret = hclge_tqp_enable(hdev, i, 0, en);
6750                 if (ret)
6751                         return ret;
6752         }
6753
6754         return 0;
6755 }
6756
6757 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6758 {
6759         int ret;
6760
6761         ret = hclge_set_app_loopback(hdev, false);
6762         if (ret)
6763                 return ret;
6764
6765         ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6766         if (ret)
6767                 return ret;
6768
6769         return hclge_cfg_serdes_loopback(hdev, false,
6770                                          HNAE3_LOOP_PARALLEL_SERDES);
6771 }
6772
6773 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6774 {
6775         struct hclge_vport *vport = hclge_get_vport(handle);
6776         struct hnae3_knic_private_info *kinfo;
6777         struct hnae3_queue *queue;
6778         struct hclge_tqp *tqp;
6779         int i;
6780
6781         kinfo = &vport->nic.kinfo;
6782         for (i = 0; i < kinfo->num_tqps; i++) {
6783                 queue = handle->kinfo.tqp[i];
6784                 tqp = container_of(queue, struct hclge_tqp, q);
6785                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6786         }
6787 }
6788
6789 static void hclge_flush_link_update(struct hclge_dev *hdev)
6790 {
6791 #define HCLGE_FLUSH_LINK_TIMEOUT        100000
6792
6793         unsigned long last = hdev->serv_processed_cnt;
6794         int i = 0;
6795
6796         while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6797                i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6798                last == hdev->serv_processed_cnt)
6799                 usleep_range(1, 1);
6800 }
6801
6802 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6803 {
6804         struct hclge_vport *vport = hclge_get_vport(handle);
6805         struct hclge_dev *hdev = vport->back;
6806
6807         if (enable) {
6808                 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
6809         } else {
6810                 /* Set the DOWN flag here to disable link updating */
6811                 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6812
6813                 /* flush memory to make sure DOWN is seen by service task */
6814                 smp_mb__before_atomic();
6815                 hclge_flush_link_update(hdev);
6816         }
6817 }
6818
6819 static int hclge_ae_start(struct hnae3_handle *handle)
6820 {
6821         struct hclge_vport *vport = hclge_get_vport(handle);
6822         struct hclge_dev *hdev = vport->back;
6823
6824         /* mac enable */
6825         hclge_cfg_mac_mode(hdev, true);
6826         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6827         hdev->hw.mac.link = 0;
6828
6829         /* reset tqp stats */
6830         hclge_reset_tqp_stats(handle);
6831
6832         hclge_mac_start_phy(hdev);
6833
6834         return 0;
6835 }
6836
6837 static void hclge_ae_stop(struct hnae3_handle *handle)
6838 {
6839         struct hclge_vport *vport = hclge_get_vport(handle);
6840         struct hclge_dev *hdev = vport->back;
6841         int i;
6842
6843         set_bit(HCLGE_STATE_DOWN, &hdev->state);
6844
6845         hclge_clear_arfs_rules(handle);
6846
6847         /* If it is not PF reset, the firmware will disable the MAC,
6848          * so it only need to stop phy here.
6849          */
6850         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6851             hdev->reset_type != HNAE3_FUNC_RESET) {
6852                 hclge_mac_stop_phy(hdev);
6853                 hclge_update_link_status(hdev);
6854                 return;
6855         }
6856
6857         for (i = 0; i < handle->kinfo.num_tqps; i++)
6858                 hclge_reset_tqp(handle, i);
6859
6860         hclge_config_mac_tnl_int(hdev, false);
6861
6862         /* Mac disable */
6863         hclge_cfg_mac_mode(hdev, false);
6864
6865         hclge_mac_stop_phy(hdev);
6866
6867         /* reset tqp stats */
6868         hclge_reset_tqp_stats(handle);
6869         hclge_update_link_status(hdev);
6870 }
6871
6872 int hclge_vport_start(struct hclge_vport *vport)
6873 {
6874         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6875         vport->last_active_jiffies = jiffies;
6876         return 0;
6877 }
6878
6879 void hclge_vport_stop(struct hclge_vport *vport)
6880 {
6881         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6882 }
6883
6884 static int hclge_client_start(struct hnae3_handle *handle)
6885 {
6886         struct hclge_vport *vport = hclge_get_vport(handle);
6887
6888         return hclge_vport_start(vport);
6889 }
6890
6891 static void hclge_client_stop(struct hnae3_handle *handle)
6892 {
6893         struct hclge_vport *vport = hclge_get_vport(handle);
6894
6895         hclge_vport_stop(vport);
6896 }
6897
6898 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6899                                          u16 cmdq_resp, u8  resp_code,
6900                                          enum hclge_mac_vlan_tbl_opcode op)
6901 {
6902         struct hclge_dev *hdev = vport->back;
6903
6904         if (cmdq_resp) {
6905                 dev_err(&hdev->pdev->dev,
6906                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
6907                         cmdq_resp);
6908                 return -EIO;
6909         }
6910
6911         if (op == HCLGE_MAC_VLAN_ADD) {
6912                 if ((!resp_code) || (resp_code == 1)) {
6913                         return 0;
6914                 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6915                         dev_err(&hdev->pdev->dev,
6916                                 "add mac addr failed for uc_overflow.\n");
6917                         return -ENOSPC;
6918                 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6919                         dev_err(&hdev->pdev->dev,
6920                                 "add mac addr failed for mc_overflow.\n");
6921                         return -ENOSPC;
6922                 }
6923
6924                 dev_err(&hdev->pdev->dev,
6925                         "add mac addr failed for undefined, code=%u.\n",
6926                         resp_code);
6927                 return -EIO;
6928         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6929                 if (!resp_code) {
6930                         return 0;
6931                 } else if (resp_code == 1) {
6932                         dev_dbg(&hdev->pdev->dev,
6933                                 "remove mac addr failed for miss.\n");
6934                         return -ENOENT;
6935                 }
6936
6937                 dev_err(&hdev->pdev->dev,
6938                         "remove mac addr failed for undefined, code=%u.\n",
6939                         resp_code);
6940                 return -EIO;
6941         } else if (op == HCLGE_MAC_VLAN_LKUP) {
6942                 if (!resp_code) {
6943                         return 0;
6944                 } else if (resp_code == 1) {
6945                         dev_dbg(&hdev->pdev->dev,
6946                                 "lookup mac addr failed for miss.\n");
6947                         return -ENOENT;
6948                 }
6949
6950                 dev_err(&hdev->pdev->dev,
6951                         "lookup mac addr failed for undefined, code=%u.\n",
6952                         resp_code);
6953                 return -EIO;
6954         }
6955
6956         dev_err(&hdev->pdev->dev,
6957                 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
6958
6959         return -EINVAL;
6960 }
6961
6962 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6963 {
6964 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6965
6966         unsigned int word_num;
6967         unsigned int bit_num;
6968
6969         if (vfid > 255 || vfid < 0)
6970                 return -EIO;
6971
6972         if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6973                 word_num = vfid / 32;
6974                 bit_num  = vfid % 32;
6975                 if (clr)
6976                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6977                 else
6978                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6979         } else {
6980                 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6981                 bit_num  = vfid % 32;
6982                 if (clr)
6983                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6984                 else
6985                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6986         }
6987
6988         return 0;
6989 }
6990
6991 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6992 {
6993 #define HCLGE_DESC_NUMBER 3
6994 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6995         int i, j;
6996
6997         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6998                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6999                         if (desc[i].data[j])
7000                                 return false;
7001
7002         return true;
7003 }
7004
7005 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7006                                    const u8 *addr, bool is_mc)
7007 {
7008         const unsigned char *mac_addr = addr;
7009         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7010                        (mac_addr[0]) | (mac_addr[1] << 8);
7011         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
7012
7013         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7014         if (is_mc) {
7015                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7016                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7017         }
7018
7019         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7020         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7021 }
7022
7023 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7024                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
7025 {
7026         struct hclge_dev *hdev = vport->back;
7027         struct hclge_desc desc;
7028         u8 resp_code;
7029         u16 retval;
7030         int ret;
7031
7032         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7033
7034         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7035
7036         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7037         if (ret) {
7038                 dev_err(&hdev->pdev->dev,
7039                         "del mac addr failed for cmd_send, ret =%d.\n",
7040                         ret);
7041                 return ret;
7042         }
7043         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7044         retval = le16_to_cpu(desc.retval);
7045
7046         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7047                                              HCLGE_MAC_VLAN_REMOVE);
7048 }
7049
7050 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7051                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
7052                                      struct hclge_desc *desc,
7053                                      bool is_mc)
7054 {
7055         struct hclge_dev *hdev = vport->back;
7056         u8 resp_code;
7057         u16 retval;
7058         int ret;
7059
7060         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7061         if (is_mc) {
7062                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7063                 memcpy(desc[0].data,
7064                        req,
7065                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7066                 hclge_cmd_setup_basic_desc(&desc[1],
7067                                            HCLGE_OPC_MAC_VLAN_ADD,
7068                                            true);
7069                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7070                 hclge_cmd_setup_basic_desc(&desc[2],
7071                                            HCLGE_OPC_MAC_VLAN_ADD,
7072                                            true);
7073                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7074         } else {
7075                 memcpy(desc[0].data,
7076                        req,
7077                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7078                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7079         }
7080         if (ret) {
7081                 dev_err(&hdev->pdev->dev,
7082                         "lookup mac addr failed for cmd_send, ret =%d.\n",
7083                         ret);
7084                 return ret;
7085         }
7086         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7087         retval = le16_to_cpu(desc[0].retval);
7088
7089         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7090                                              HCLGE_MAC_VLAN_LKUP);
7091 }
7092
7093 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7094                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
7095                                   struct hclge_desc *mc_desc)
7096 {
7097         struct hclge_dev *hdev = vport->back;
7098         int cfg_status;
7099         u8 resp_code;
7100         u16 retval;
7101         int ret;
7102
7103         if (!mc_desc) {
7104                 struct hclge_desc desc;
7105
7106                 hclge_cmd_setup_basic_desc(&desc,
7107                                            HCLGE_OPC_MAC_VLAN_ADD,
7108                                            false);
7109                 memcpy(desc.data, req,
7110                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7111                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7112                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7113                 retval = le16_to_cpu(desc.retval);
7114
7115                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7116                                                            resp_code,
7117                                                            HCLGE_MAC_VLAN_ADD);
7118         } else {
7119                 hclge_cmd_reuse_desc(&mc_desc[0], false);
7120                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7121                 hclge_cmd_reuse_desc(&mc_desc[1], false);
7122                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7123                 hclge_cmd_reuse_desc(&mc_desc[2], false);
7124                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7125                 memcpy(mc_desc[0].data, req,
7126                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7127                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7128                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7129                 retval = le16_to_cpu(mc_desc[0].retval);
7130
7131                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7132                                                            resp_code,
7133                                                            HCLGE_MAC_VLAN_ADD);
7134         }
7135
7136         if (ret) {
7137                 dev_err(&hdev->pdev->dev,
7138                         "add mac addr failed for cmd_send, ret =%d.\n",
7139                         ret);
7140                 return ret;
7141         }
7142
7143         return cfg_status;
7144 }
7145
7146 static int hclge_init_umv_space(struct hclge_dev *hdev)
7147 {
7148         u16 allocated_size = 0;
7149         int ret;
7150
7151         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
7152                                   true);
7153         if (ret)
7154                 return ret;
7155
7156         if (allocated_size < hdev->wanted_umv_size)
7157                 dev_warn(&hdev->pdev->dev,
7158                          "Alloc umv space failed, want %u, get %u\n",
7159                          hdev->wanted_umv_size, allocated_size);
7160
7161         mutex_init(&hdev->umv_mutex);
7162         hdev->max_umv_size = allocated_size;
7163         /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
7164          * preserve some unicast mac vlan table entries shared by pf
7165          * and its vfs.
7166          */
7167         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
7168         hdev->share_umv_size = hdev->priv_umv_size +
7169                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
7170
7171         return 0;
7172 }
7173
7174 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
7175 {
7176         int ret;
7177
7178         if (hdev->max_umv_size > 0) {
7179                 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
7180                                           false);
7181                 if (ret)
7182                         return ret;
7183                 hdev->max_umv_size = 0;
7184         }
7185         mutex_destroy(&hdev->umv_mutex);
7186
7187         return 0;
7188 }
7189
7190 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7191                                u16 *allocated_size, bool is_alloc)
7192 {
7193         struct hclge_umv_spc_alc_cmd *req;
7194         struct hclge_desc desc;
7195         int ret;
7196
7197         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7198         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7199         if (!is_alloc)
7200                 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
7201
7202         req->space_size = cpu_to_le32(space_size);
7203
7204         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7205         if (ret) {
7206                 dev_err(&hdev->pdev->dev,
7207                         "%s umv space failed for cmd_send, ret =%d\n",
7208                         is_alloc ? "allocate" : "free", ret);
7209                 return ret;
7210         }
7211
7212         if (is_alloc && allocated_size)
7213                 *allocated_size = le32_to_cpu(desc.data[1]);
7214
7215         return 0;
7216 }
7217
7218 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7219 {
7220         struct hclge_vport *vport;
7221         int i;
7222
7223         for (i = 0; i < hdev->num_alloc_vport; i++) {
7224                 vport = &hdev->vport[i];
7225                 vport->used_umv_num = 0;
7226         }
7227
7228         mutex_lock(&hdev->umv_mutex);
7229         hdev->share_umv_size = hdev->priv_umv_size +
7230                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
7231         mutex_unlock(&hdev->umv_mutex);
7232 }
7233
7234 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
7235 {
7236         struct hclge_dev *hdev = vport->back;
7237         bool is_full;
7238
7239         mutex_lock(&hdev->umv_mutex);
7240         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7241                    hdev->share_umv_size == 0);
7242         mutex_unlock(&hdev->umv_mutex);
7243
7244         return is_full;
7245 }
7246
7247 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7248 {
7249         struct hclge_dev *hdev = vport->back;
7250
7251         mutex_lock(&hdev->umv_mutex);
7252         if (is_free) {
7253                 if (vport->used_umv_num > hdev->priv_umv_size)
7254                         hdev->share_umv_size++;
7255
7256                 if (vport->used_umv_num > 0)
7257                         vport->used_umv_num--;
7258         } else {
7259                 if (vport->used_umv_num >= hdev->priv_umv_size &&
7260                     hdev->share_umv_size > 0)
7261                         hdev->share_umv_size--;
7262                 vport->used_umv_num++;
7263         }
7264         mutex_unlock(&hdev->umv_mutex);
7265 }
7266
7267 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7268                              const unsigned char *addr)
7269 {
7270         struct hclge_vport *vport = hclge_get_vport(handle);
7271
7272         return hclge_add_uc_addr_common(vport, addr);
7273 }
7274
7275 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7276                              const unsigned char *addr)
7277 {
7278         struct hclge_dev *hdev = vport->back;
7279         struct hclge_mac_vlan_tbl_entry_cmd req;
7280         struct hclge_desc desc;
7281         u16 egress_port = 0;
7282         int ret;
7283
7284         /* mac addr check */
7285         if (is_zero_ether_addr(addr) ||
7286             is_broadcast_ether_addr(addr) ||
7287             is_multicast_ether_addr(addr)) {
7288                 dev_err(&hdev->pdev->dev,
7289                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7290                          addr, is_zero_ether_addr(addr),
7291                          is_broadcast_ether_addr(addr),
7292                          is_multicast_ether_addr(addr));
7293                 return -EINVAL;
7294         }
7295
7296         memset(&req, 0, sizeof(req));
7297
7298         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7299                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7300
7301         req.egress_port = cpu_to_le16(egress_port);
7302
7303         hclge_prepare_mac_addr(&req, addr, false);
7304
7305         /* Lookup the mac address in the mac_vlan table, and add
7306          * it if the entry is inexistent. Repeated unicast entry
7307          * is not allowed in the mac vlan table.
7308          */
7309         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7310         if (ret == -ENOENT) {
7311                 if (!hclge_is_umv_space_full(vport)) {
7312                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7313                         if (!ret)
7314                                 hclge_update_umv_space(vport, false);
7315                         return ret;
7316                 }
7317
7318                 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7319                         hdev->priv_umv_size);
7320
7321                 return -ENOSPC;
7322         }
7323
7324         /* check if we just hit the duplicate */
7325         if (!ret) {
7326                 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7327                          vport->vport_id, addr);
7328                 return 0;
7329         }
7330
7331         dev_err(&hdev->pdev->dev,
7332                 "PF failed to add unicast entry(%pM) in the MAC table\n",
7333                 addr);
7334
7335         return ret;
7336 }
7337
7338 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7339                             const unsigned char *addr)
7340 {
7341         struct hclge_vport *vport = hclge_get_vport(handle);
7342
7343         return hclge_rm_uc_addr_common(vport, addr);
7344 }
7345
7346 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7347                             const unsigned char *addr)
7348 {
7349         struct hclge_dev *hdev = vport->back;
7350         struct hclge_mac_vlan_tbl_entry_cmd req;
7351         int ret;
7352
7353         /* mac addr check */
7354         if (is_zero_ether_addr(addr) ||
7355             is_broadcast_ether_addr(addr) ||
7356             is_multicast_ether_addr(addr)) {
7357                 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7358                         addr);
7359                 return -EINVAL;
7360         }
7361
7362         memset(&req, 0, sizeof(req));
7363         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7364         hclge_prepare_mac_addr(&req, addr, false);
7365         ret = hclge_remove_mac_vlan_tbl(vport, &req);
7366         if (!ret)
7367                 hclge_update_umv_space(vport, true);
7368
7369         return ret;
7370 }
7371
7372 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7373                              const unsigned char *addr)
7374 {
7375         struct hclge_vport *vport = hclge_get_vport(handle);
7376
7377         return hclge_add_mc_addr_common(vport, addr);
7378 }
7379
7380 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7381                              const unsigned char *addr)
7382 {
7383         struct hclge_dev *hdev = vport->back;
7384         struct hclge_mac_vlan_tbl_entry_cmd req;
7385         struct hclge_desc desc[3];
7386         int status;
7387
7388         /* mac addr check */
7389         if (!is_multicast_ether_addr(addr)) {
7390                 dev_err(&hdev->pdev->dev,
7391                         "Add mc mac err! invalid mac:%pM.\n",
7392                          addr);
7393                 return -EINVAL;
7394         }
7395         memset(&req, 0, sizeof(req));
7396         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7397         hclge_prepare_mac_addr(&req, addr, true);
7398         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7399         if (status) {
7400                 /* This mac addr do not exist, add new entry for it */
7401                 memset(desc[0].data, 0, sizeof(desc[0].data));
7402                 memset(desc[1].data, 0, sizeof(desc[0].data));
7403                 memset(desc[2].data, 0, sizeof(desc[0].data));
7404         }
7405         status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7406         if (status)
7407                 return status;
7408         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7409
7410         if (status == -ENOSPC)
7411                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7412
7413         return status;
7414 }
7415
7416 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7417                             const unsigned char *addr)
7418 {
7419         struct hclge_vport *vport = hclge_get_vport(handle);
7420
7421         return hclge_rm_mc_addr_common(vport, addr);
7422 }
7423
7424 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7425                             const unsigned char *addr)
7426 {
7427         struct hclge_dev *hdev = vport->back;
7428         struct hclge_mac_vlan_tbl_entry_cmd req;
7429         enum hclge_cmd_status status;
7430         struct hclge_desc desc[3];
7431
7432         /* mac addr check */
7433         if (!is_multicast_ether_addr(addr)) {
7434                 dev_dbg(&hdev->pdev->dev,
7435                         "Remove mc mac err! invalid mac:%pM.\n",
7436                          addr);
7437                 return -EINVAL;
7438         }
7439
7440         memset(&req, 0, sizeof(req));
7441         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7442         hclge_prepare_mac_addr(&req, addr, true);
7443         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7444         if (!status) {
7445                 /* This mac addr exist, remove this handle's VFID for it */
7446                 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7447                 if (status)
7448                         return status;
7449
7450                 if (hclge_is_all_function_id_zero(desc))
7451                         /* All the vfid is zero, so need to delete this entry */
7452                         status = hclge_remove_mac_vlan_tbl(vport, &req);
7453                 else
7454                         /* Not all the vfid is zero, update the vfid */
7455                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7456
7457         } else {
7458                 /* Maybe this mac address is in mta table, but it cannot be
7459                  * deleted here because an entry of mta represents an address
7460                  * range rather than a specific address. the delete action to
7461                  * all entries will take effect in update_mta_status called by
7462                  * hns3_nic_set_rx_mode.
7463                  */
7464                 status = 0;
7465         }
7466
7467         return status;
7468 }
7469
7470 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7471                                enum HCLGE_MAC_ADDR_TYPE mac_type)
7472 {
7473         struct hclge_vport_mac_addr_cfg *mac_cfg;
7474         struct list_head *list;
7475
7476         if (!vport->vport_id)
7477                 return;
7478
7479         mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
7480         if (!mac_cfg)
7481                 return;
7482
7483         mac_cfg->hd_tbl_status = true;
7484         memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
7485
7486         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7487                &vport->uc_mac_list : &vport->mc_mac_list;
7488
7489         list_add_tail(&mac_cfg->node, list);
7490 }
7491
7492 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7493                               bool is_write_tbl,
7494                               enum HCLGE_MAC_ADDR_TYPE mac_type)
7495 {
7496         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7497         struct list_head *list;
7498         bool uc_flag, mc_flag;
7499
7500         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7501                &vport->uc_mac_list : &vport->mc_mac_list;
7502
7503         uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
7504         mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
7505
7506         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7507                 if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
7508                         if (uc_flag && mac_cfg->hd_tbl_status)
7509                                 hclge_rm_uc_addr_common(vport, mac_addr);
7510
7511                         if (mc_flag && mac_cfg->hd_tbl_status)
7512                                 hclge_rm_mc_addr_common(vport, mac_addr);
7513
7514                         list_del(&mac_cfg->node);
7515                         kfree(mac_cfg);
7516                         break;
7517                 }
7518         }
7519 }
7520
7521 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7522                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
7523 {
7524         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7525         struct list_head *list;
7526
7527         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7528                &vport->uc_mac_list : &vport->mc_mac_list;
7529
7530         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7531                 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
7532                         hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
7533
7534                 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
7535                         hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
7536
7537                 mac_cfg->hd_tbl_status = false;
7538                 if (is_del_list) {
7539                         list_del(&mac_cfg->node);
7540                         kfree(mac_cfg);
7541                 }
7542         }
7543 }
7544
7545 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
7546 {
7547         struct hclge_vport_mac_addr_cfg *mac, *tmp;
7548         struct hclge_vport *vport;
7549         int i;
7550
7551         for (i = 0; i < hdev->num_alloc_vport; i++) {
7552                 vport = &hdev->vport[i];
7553                 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
7554                         list_del(&mac->node);
7555                         kfree(mac);
7556                 }
7557
7558                 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
7559                         list_del(&mac->node);
7560                         kfree(mac);
7561                 }
7562         }
7563 }
7564
7565 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7566                                               u16 cmdq_resp, u8 resp_code)
7567 {
7568 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
7569 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
7570 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
7571 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
7572
7573         int return_status;
7574
7575         if (cmdq_resp) {
7576                 dev_err(&hdev->pdev->dev,
7577                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
7578                         cmdq_resp);
7579                 return -EIO;
7580         }
7581
7582         switch (resp_code) {
7583         case HCLGE_ETHERTYPE_SUCCESS_ADD:
7584         case HCLGE_ETHERTYPE_ALREADY_ADD:
7585                 return_status = 0;
7586                 break;
7587         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7588                 dev_err(&hdev->pdev->dev,
7589                         "add mac ethertype failed for manager table overflow.\n");
7590                 return_status = -EIO;
7591                 break;
7592         case HCLGE_ETHERTYPE_KEY_CONFLICT:
7593                 dev_err(&hdev->pdev->dev,
7594                         "add mac ethertype failed for key conflict.\n");
7595                 return_status = -EIO;
7596                 break;
7597         default:
7598                 dev_err(&hdev->pdev->dev,
7599                         "add mac ethertype failed for undefined, code=%u.\n",
7600                         resp_code);
7601                 return_status = -EIO;
7602         }
7603
7604         return return_status;
7605 }
7606
7607 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
7608                                      u8 *mac_addr)
7609 {
7610         struct hclge_mac_vlan_tbl_entry_cmd req;
7611         struct hclge_dev *hdev = vport->back;
7612         struct hclge_desc desc;
7613         u16 egress_port = 0;
7614         int i;
7615
7616         if (is_zero_ether_addr(mac_addr))
7617                 return false;
7618
7619         memset(&req, 0, sizeof(req));
7620         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7621                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7622         req.egress_port = cpu_to_le16(egress_port);
7623         hclge_prepare_mac_addr(&req, mac_addr, false);
7624
7625         if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
7626                 return true;
7627
7628         vf_idx += HCLGE_VF_VPORT_START_NUM;
7629         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
7630                 if (i != vf_idx &&
7631                     ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
7632                         return true;
7633
7634         return false;
7635 }
7636
7637 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
7638                             u8 *mac_addr)
7639 {
7640         struct hclge_vport *vport = hclge_get_vport(handle);
7641         struct hclge_dev *hdev = vport->back;
7642
7643         vport = hclge_get_vf_vport(hdev, vf);
7644         if (!vport)
7645                 return -EINVAL;
7646
7647         if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
7648                 dev_info(&hdev->pdev->dev,
7649                          "Specified MAC(=%pM) is same as before, no change committed!\n",
7650                          mac_addr);
7651                 return 0;
7652         }
7653
7654         if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
7655                 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
7656                         mac_addr);
7657                 return -EEXIST;
7658         }
7659
7660         ether_addr_copy(vport->vf_info.mac, mac_addr);
7661         dev_info(&hdev->pdev->dev,
7662                  "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
7663                  vf, mac_addr);
7664
7665         return hclge_inform_reset_assert_to_vf(vport);
7666 }
7667
7668 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
7669                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
7670 {
7671         struct hclge_desc desc;
7672         u8 resp_code;
7673         u16 retval;
7674         int ret;
7675
7676         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
7677         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
7678
7679         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7680         if (ret) {
7681                 dev_err(&hdev->pdev->dev,
7682                         "add mac ethertype failed for cmd_send, ret =%d.\n",
7683                         ret);
7684                 return ret;
7685         }
7686
7687         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7688         retval = le16_to_cpu(desc.retval);
7689
7690         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7691 }
7692
7693 static int init_mgr_tbl(struct hclge_dev *hdev)
7694 {
7695         int ret;
7696         int i;
7697
7698         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7699                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7700                 if (ret) {
7701                         dev_err(&hdev->pdev->dev,
7702                                 "add mac ethertype failed, ret =%d.\n",
7703                                 ret);
7704                         return ret;
7705                 }
7706         }
7707
7708         return 0;
7709 }
7710
7711 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7712 {
7713         struct hclge_vport *vport = hclge_get_vport(handle);
7714         struct hclge_dev *hdev = vport->back;
7715
7716         ether_addr_copy(p, hdev->hw.mac.mac_addr);
7717 }
7718
7719 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7720                               bool is_first)
7721 {
7722         const unsigned char *new_addr = (const unsigned char *)p;
7723         struct hclge_vport *vport = hclge_get_vport(handle);
7724         struct hclge_dev *hdev = vport->back;
7725         int ret;
7726
7727         /* mac addr check */
7728         if (is_zero_ether_addr(new_addr) ||
7729             is_broadcast_ether_addr(new_addr) ||
7730             is_multicast_ether_addr(new_addr)) {
7731                 dev_err(&hdev->pdev->dev,
7732                         "Change uc mac err! invalid mac:%pM.\n",
7733                          new_addr);
7734                 return -EINVAL;
7735         }
7736
7737         if ((!is_first || is_kdump_kernel()) &&
7738             hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7739                 dev_warn(&hdev->pdev->dev,
7740                          "remove old uc mac address fail.\n");
7741
7742         ret = hclge_add_uc_addr(handle, new_addr);
7743         if (ret) {
7744                 dev_err(&hdev->pdev->dev,
7745                         "add uc mac address fail, ret =%d.\n",
7746                         ret);
7747
7748                 if (!is_first &&
7749                     hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7750                         dev_err(&hdev->pdev->dev,
7751                                 "restore uc mac address fail.\n");
7752
7753                 return -EIO;
7754         }
7755
7756         ret = hclge_pause_addr_cfg(hdev, new_addr);
7757         if (ret) {
7758                 dev_err(&hdev->pdev->dev,
7759                         "configure mac pause address fail, ret =%d.\n",
7760                         ret);
7761                 return -EIO;
7762         }
7763
7764         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7765
7766         return 0;
7767 }
7768
7769 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7770                           int cmd)
7771 {
7772         struct hclge_vport *vport = hclge_get_vport(handle);
7773         struct hclge_dev *hdev = vport->back;
7774
7775         if (!hdev->hw.mac.phydev)
7776                 return -EOPNOTSUPP;
7777
7778         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7779 }
7780
7781 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7782                                       u8 fe_type, bool filter_en, u8 vf_id)
7783 {
7784         struct hclge_vlan_filter_ctrl_cmd *req;
7785         struct hclge_desc desc;
7786         int ret;
7787
7788         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7789
7790         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7791         req->vlan_type = vlan_type;
7792         req->vlan_fe = filter_en ? fe_type : 0;
7793         req->vf_id = vf_id;
7794
7795         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7796         if (ret)
7797                 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7798                         ret);
7799
7800         return ret;
7801 }
7802
7803 #define HCLGE_FILTER_TYPE_VF            0
7804 #define HCLGE_FILTER_TYPE_PORT          1
7805 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
7806 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
7807 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
7808 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
7809 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
7810 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
7811                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7812 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
7813                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7814
7815 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7816 {
7817         struct hclge_vport *vport = hclge_get_vport(handle);
7818         struct hclge_dev *hdev = vport->back;
7819
7820         if (hdev->pdev->revision >= 0x21) {
7821                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7822                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
7823                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7824                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
7825         } else {
7826                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7827                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7828                                            0);
7829         }
7830         if (enable)
7831                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7832         else
7833                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7834 }
7835
7836 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7837                                     bool is_kill, u16 vlan,
7838                                     __be16 proto)
7839 {
7840         struct hclge_vport *vport = &hdev->vport[vfid];
7841         struct hclge_vlan_filter_vf_cfg_cmd *req0;
7842         struct hclge_vlan_filter_vf_cfg_cmd *req1;
7843         struct hclge_desc desc[2];
7844         u8 vf_byte_val;
7845         u8 vf_byte_off;
7846         int ret;
7847
7848         /* if vf vlan table is full, firmware will close vf vlan filter, it
7849          * is unable and unnecessary to add new vlan id to vf vlan filter.
7850          * If spoof check is enable, and vf vlan is full, it shouldn't add
7851          * new vlan, because tx packets with these vlan id will be dropped.
7852          */
7853         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
7854                 if (vport->vf_info.spoofchk && vlan) {
7855                         dev_err(&hdev->pdev->dev,
7856                                 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
7857                         return -EPERM;
7858                 }
7859                 return 0;
7860         }
7861
7862         hclge_cmd_setup_basic_desc(&desc[0],
7863                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7864         hclge_cmd_setup_basic_desc(&desc[1],
7865                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7866
7867         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7868
7869         vf_byte_off = vfid / 8;
7870         vf_byte_val = 1 << (vfid % 8);
7871
7872         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7873         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7874
7875         req0->vlan_id  = cpu_to_le16(vlan);
7876         req0->vlan_cfg = is_kill;
7877
7878         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7879                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7880         else
7881                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7882
7883         ret = hclge_cmd_send(&hdev->hw, desc, 2);
7884         if (ret) {
7885                 dev_err(&hdev->pdev->dev,
7886                         "Send vf vlan command fail, ret =%d.\n",
7887                         ret);
7888                 return ret;
7889         }
7890
7891         if (!is_kill) {
7892 #define HCLGE_VF_VLAN_NO_ENTRY  2
7893                 if (!req0->resp_code || req0->resp_code == 1)
7894                         return 0;
7895
7896                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7897                         set_bit(vfid, hdev->vf_vlan_full);
7898                         dev_warn(&hdev->pdev->dev,
7899                                  "vf vlan table is full, vf vlan filter is disabled\n");
7900                         return 0;
7901                 }
7902
7903                 dev_err(&hdev->pdev->dev,
7904                         "Add vf vlan filter fail, ret =%u.\n",
7905                         req0->resp_code);
7906         } else {
7907 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
7908                 if (!req0->resp_code)
7909                         return 0;
7910
7911                 /* vf vlan filter is disabled when vf vlan table is full,
7912                  * then new vlan id will not be added into vf vlan table.
7913                  * Just return 0 without warning, avoid massive verbose
7914                  * print logs when unload.
7915                  */
7916                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7917                         return 0;
7918
7919                 dev_err(&hdev->pdev->dev,
7920                         "Kill vf vlan filter fail, ret =%u.\n",
7921                         req0->resp_code);
7922         }
7923
7924         return -EIO;
7925 }
7926
7927 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7928                                       u16 vlan_id, bool is_kill)
7929 {
7930         struct hclge_vlan_filter_pf_cfg_cmd *req;
7931         struct hclge_desc desc;
7932         u8 vlan_offset_byte_val;
7933         u8 vlan_offset_byte;
7934         u8 vlan_offset_160;
7935         int ret;
7936
7937         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7938
7939         vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
7940         vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
7941                            HCLGE_VLAN_BYTE_SIZE;
7942         vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
7943
7944         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7945         req->vlan_offset = vlan_offset_160;
7946         req->vlan_cfg = is_kill;
7947         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7948
7949         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7950         if (ret)
7951                 dev_err(&hdev->pdev->dev,
7952                         "port vlan command, send fail, ret =%d.\n", ret);
7953         return ret;
7954 }
7955
7956 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7957                                     u16 vport_id, u16 vlan_id,
7958                                     bool is_kill)
7959 {
7960         u16 vport_idx, vport_num = 0;
7961         int ret;
7962
7963         if (is_kill && !vlan_id)
7964                 return 0;
7965
7966         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7967                                        proto);
7968         if (ret) {
7969                 dev_err(&hdev->pdev->dev,
7970                         "Set %u vport vlan filter config fail, ret =%d.\n",
7971                         vport_id, ret);
7972                 return ret;
7973         }
7974
7975         /* vlan 0 may be added twice when 8021q module is enabled */
7976         if (!is_kill && !vlan_id &&
7977             test_bit(vport_id, hdev->vlan_table[vlan_id]))
7978                 return 0;
7979
7980         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7981                 dev_err(&hdev->pdev->dev,
7982                         "Add port vlan failed, vport %u is already in vlan %u\n",
7983                         vport_id, vlan_id);
7984                 return -EINVAL;
7985         }
7986
7987         if (is_kill &&
7988             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7989                 dev_err(&hdev->pdev->dev,
7990                         "Delete port vlan failed, vport %u is not in vlan %u\n",
7991                         vport_id, vlan_id);
7992                 return -EINVAL;
7993         }
7994
7995         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7996                 vport_num++;
7997
7998         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7999                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
8000                                                  is_kill);
8001
8002         return ret;
8003 }
8004
8005 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
8006 {
8007         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
8008         struct hclge_vport_vtag_tx_cfg_cmd *req;
8009         struct hclge_dev *hdev = vport->back;
8010         struct hclge_desc desc;
8011         u16 bmap_index;
8012         int status;
8013
8014         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
8015
8016         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
8017         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
8018         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
8019         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
8020                       vcfg->accept_tag1 ? 1 : 0);
8021         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
8022                       vcfg->accept_untag1 ? 1 : 0);
8023         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
8024                       vcfg->accept_tag2 ? 1 : 0);
8025         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
8026                       vcfg->accept_untag2 ? 1 : 0);
8027         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
8028                       vcfg->insert_tag1_en ? 1 : 0);
8029         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8030                       vcfg->insert_tag2_en ? 1 : 0);
8031         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
8032
8033         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8034         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8035                         HCLGE_VF_NUM_PER_BYTE;
8036         req->vf_bitmap[bmap_index] =
8037                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8038
8039         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8040         if (status)
8041                 dev_err(&hdev->pdev->dev,
8042                         "Send port txvlan cfg command fail, ret =%d\n",
8043                         status);
8044
8045         return status;
8046 }
8047
8048 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8049 {
8050         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8051         struct hclge_vport_vtag_rx_cfg_cmd *req;
8052         struct hclge_dev *hdev = vport->back;
8053         struct hclge_desc desc;
8054         u16 bmap_index;
8055         int status;
8056
8057         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8058
8059         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8060         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8061                       vcfg->strip_tag1_en ? 1 : 0);
8062         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8063                       vcfg->strip_tag2_en ? 1 : 0);
8064         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8065                       vcfg->vlan1_vlan_prionly ? 1 : 0);
8066         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8067                       vcfg->vlan2_vlan_prionly ? 1 : 0);
8068
8069         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8070         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8071                         HCLGE_VF_NUM_PER_BYTE;
8072         req->vf_bitmap[bmap_index] =
8073                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8074
8075         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8076         if (status)
8077                 dev_err(&hdev->pdev->dev,
8078                         "Send port rxvlan cfg command fail, ret =%d\n",
8079                         status);
8080
8081         return status;
8082 }
8083
8084 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8085                                   u16 port_base_vlan_state,
8086                                   u16 vlan_tag)
8087 {
8088         int ret;
8089
8090         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8091                 vport->txvlan_cfg.accept_tag1 = true;
8092                 vport->txvlan_cfg.insert_tag1_en = false;
8093                 vport->txvlan_cfg.default_tag1 = 0;
8094         } else {
8095                 vport->txvlan_cfg.accept_tag1 = false;
8096                 vport->txvlan_cfg.insert_tag1_en = true;
8097                 vport->txvlan_cfg.default_tag1 = vlan_tag;
8098         }
8099
8100         vport->txvlan_cfg.accept_untag1 = true;
8101
8102         /* accept_tag2 and accept_untag2 are not supported on
8103          * pdev revision(0x20), new revision support them,
8104          * this two fields can not be configured by user.
8105          */
8106         vport->txvlan_cfg.accept_tag2 = true;
8107         vport->txvlan_cfg.accept_untag2 = true;
8108         vport->txvlan_cfg.insert_tag2_en = false;
8109         vport->txvlan_cfg.default_tag2 = 0;
8110
8111         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8112                 vport->rxvlan_cfg.strip_tag1_en = false;
8113                 vport->rxvlan_cfg.strip_tag2_en =
8114                                 vport->rxvlan_cfg.rx_vlan_offload_en;
8115         } else {
8116                 vport->rxvlan_cfg.strip_tag1_en =
8117                                 vport->rxvlan_cfg.rx_vlan_offload_en;
8118                 vport->rxvlan_cfg.strip_tag2_en = true;
8119         }
8120         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8121         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8122
8123         ret = hclge_set_vlan_tx_offload_cfg(vport);
8124         if (ret)
8125                 return ret;
8126
8127         return hclge_set_vlan_rx_offload_cfg(vport);
8128 }
8129
8130 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8131 {
8132         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8133         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8134         struct hclge_desc desc;
8135         int status;
8136
8137         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8138         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8139         rx_req->ot_fst_vlan_type =
8140                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8141         rx_req->ot_sec_vlan_type =
8142                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8143         rx_req->in_fst_vlan_type =
8144                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8145         rx_req->in_sec_vlan_type =
8146                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8147
8148         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8149         if (status) {
8150                 dev_err(&hdev->pdev->dev,
8151                         "Send rxvlan protocol type command fail, ret =%d\n",
8152                         status);
8153                 return status;
8154         }
8155
8156         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8157
8158         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8159         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8160         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8161
8162         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8163         if (status)
8164                 dev_err(&hdev->pdev->dev,
8165                         "Send txvlan protocol type command fail, ret =%d\n",
8166                         status);
8167
8168         return status;
8169 }
8170
8171 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8172 {
8173 #define HCLGE_DEF_VLAN_TYPE             0x8100
8174
8175         struct hnae3_handle *handle = &hdev->vport[0].nic;
8176         struct hclge_vport *vport;
8177         int ret;
8178         int i;
8179
8180         if (hdev->pdev->revision >= 0x21) {
8181                 /* for revision 0x21, vf vlan filter is per function */
8182                 for (i = 0; i < hdev->num_alloc_vport; i++) {
8183                         vport = &hdev->vport[i];
8184                         ret = hclge_set_vlan_filter_ctrl(hdev,
8185                                                          HCLGE_FILTER_TYPE_VF,
8186                                                          HCLGE_FILTER_FE_EGRESS,
8187                                                          true,
8188                                                          vport->vport_id);
8189                         if (ret)
8190                                 return ret;
8191                 }
8192
8193                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8194                                                  HCLGE_FILTER_FE_INGRESS, true,
8195                                                  0);
8196                 if (ret)
8197                         return ret;
8198         } else {
8199                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8200                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
8201                                                  true, 0);
8202                 if (ret)
8203                         return ret;
8204         }
8205
8206         handle->netdev_flags |= HNAE3_VLAN_FLTR;
8207
8208         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8209         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8210         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8211         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8212         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8213         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8214
8215         ret = hclge_set_vlan_protocol_type(hdev);
8216         if (ret)
8217                 return ret;
8218
8219         for (i = 0; i < hdev->num_alloc_vport; i++) {
8220                 u16 vlan_tag;
8221
8222                 vport = &hdev->vport[i];
8223                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8224
8225                 ret = hclge_vlan_offload_cfg(vport,
8226                                              vport->port_base_vlan_cfg.state,
8227                                              vlan_tag);
8228                 if (ret)
8229                         return ret;
8230         }
8231
8232         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8233 }
8234
8235 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8236                                        bool writen_to_tbl)
8237 {
8238         struct hclge_vport_vlan_cfg *vlan;
8239
8240         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8241         if (!vlan)
8242                 return;
8243
8244         vlan->hd_tbl_status = writen_to_tbl;
8245         vlan->vlan_id = vlan_id;
8246
8247         list_add_tail(&vlan->node, &vport->vlan_list);
8248 }
8249
8250 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8251 {
8252         struct hclge_vport_vlan_cfg *vlan, *tmp;
8253         struct hclge_dev *hdev = vport->back;
8254         int ret;
8255
8256         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8257                 if (!vlan->hd_tbl_status) {
8258                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8259                                                        vport->vport_id,
8260                                                        vlan->vlan_id, false);
8261                         if (ret) {
8262                                 dev_err(&hdev->pdev->dev,
8263                                         "restore vport vlan list failed, ret=%d\n",
8264                                         ret);
8265                                 return ret;
8266                         }
8267                 }
8268                 vlan->hd_tbl_status = true;
8269         }
8270
8271         return 0;
8272 }
8273
8274 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8275                                       bool is_write_tbl)
8276 {
8277         struct hclge_vport_vlan_cfg *vlan, *tmp;
8278         struct hclge_dev *hdev = vport->back;
8279
8280         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8281                 if (vlan->vlan_id == vlan_id) {
8282                         if (is_write_tbl && vlan->hd_tbl_status)
8283                                 hclge_set_vlan_filter_hw(hdev,
8284                                                          htons(ETH_P_8021Q),
8285                                                          vport->vport_id,
8286                                                          vlan_id,
8287                                                          true);
8288
8289                         list_del(&vlan->node);
8290                         kfree(vlan);
8291                         break;
8292                 }
8293         }
8294 }
8295
8296 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8297 {
8298         struct hclge_vport_vlan_cfg *vlan, *tmp;
8299         struct hclge_dev *hdev = vport->back;
8300
8301         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8302                 if (vlan->hd_tbl_status)
8303                         hclge_set_vlan_filter_hw(hdev,
8304                                                  htons(ETH_P_8021Q),
8305                                                  vport->vport_id,
8306                                                  vlan->vlan_id,
8307                                                  true);
8308
8309                 vlan->hd_tbl_status = false;
8310                 if (is_del_list) {
8311                         list_del(&vlan->node);
8312                         kfree(vlan);
8313                 }
8314         }
8315 }
8316
8317 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8318 {
8319         struct hclge_vport_vlan_cfg *vlan, *tmp;
8320         struct hclge_vport *vport;
8321         int i;
8322
8323         for (i = 0; i < hdev->num_alloc_vport; i++) {
8324                 vport = &hdev->vport[i];
8325                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8326                         list_del(&vlan->node);
8327                         kfree(vlan);
8328                 }
8329         }
8330 }
8331
8332 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
8333 {
8334         struct hclge_vport *vport = hclge_get_vport(handle);
8335         struct hclge_vport_vlan_cfg *vlan, *tmp;
8336         struct hclge_dev *hdev = vport->back;
8337         u16 vlan_proto;
8338         u16 state, vlan_id;
8339         int i;
8340
8341         for (i = 0; i < hdev->num_alloc_vport; i++) {
8342                 vport = &hdev->vport[i];
8343                 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8344                 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8345                 state = vport->port_base_vlan_cfg.state;
8346
8347                 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8348                         hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8349                                                  vport->vport_id, vlan_id,
8350                                                  false);
8351                         continue;
8352                 }
8353
8354                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8355                         int ret;
8356
8357                         if (!vlan->hd_tbl_status)
8358                                 continue;
8359                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8360                                                        vport->vport_id,
8361                                                        vlan->vlan_id, false);
8362                         if (ret)
8363                                 break;
8364                 }
8365         }
8366 }
8367
8368 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8369 {
8370         struct hclge_vport *vport = hclge_get_vport(handle);
8371
8372         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8373                 vport->rxvlan_cfg.strip_tag1_en = false;
8374                 vport->rxvlan_cfg.strip_tag2_en = enable;
8375         } else {
8376                 vport->rxvlan_cfg.strip_tag1_en = enable;
8377                 vport->rxvlan_cfg.strip_tag2_en = true;
8378         }
8379         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8380         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8381         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8382
8383         return hclge_set_vlan_rx_offload_cfg(vport);
8384 }
8385
8386 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8387                                             u16 port_base_vlan_state,
8388                                             struct hclge_vlan_info *new_info,
8389                                             struct hclge_vlan_info *old_info)
8390 {
8391         struct hclge_dev *hdev = vport->back;
8392         int ret;
8393
8394         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8395                 hclge_rm_vport_all_vlan_table(vport, false);
8396                 return hclge_set_vlan_filter_hw(hdev,
8397                                                  htons(new_info->vlan_proto),
8398                                                  vport->vport_id,
8399                                                  new_info->vlan_tag,
8400                                                  false);
8401         }
8402
8403         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8404                                        vport->vport_id, old_info->vlan_tag,
8405                                        true);
8406         if (ret)
8407                 return ret;
8408
8409         return hclge_add_vport_all_vlan_table(vport);
8410 }
8411
8412 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8413                                     struct hclge_vlan_info *vlan_info)
8414 {
8415         struct hnae3_handle *nic = &vport->nic;
8416         struct hclge_vlan_info *old_vlan_info;
8417         struct hclge_dev *hdev = vport->back;
8418         int ret;
8419
8420         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8421
8422         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8423         if (ret)
8424                 return ret;
8425
8426         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8427                 /* add new VLAN tag */
8428                 ret = hclge_set_vlan_filter_hw(hdev,
8429                                                htons(vlan_info->vlan_proto),
8430                                                vport->vport_id,
8431                                                vlan_info->vlan_tag,
8432                                                false);
8433                 if (ret)
8434                         return ret;
8435
8436                 /* remove old VLAN tag */
8437                 ret = hclge_set_vlan_filter_hw(hdev,
8438                                                htons(old_vlan_info->vlan_proto),
8439                                                vport->vport_id,
8440                                                old_vlan_info->vlan_tag,
8441                                                true);
8442                 if (ret)
8443                         return ret;
8444
8445                 goto update;
8446         }
8447
8448         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8449                                                old_vlan_info);
8450         if (ret)
8451                 return ret;
8452
8453         /* update state only when disable/enable port based VLAN */
8454         vport->port_base_vlan_cfg.state = state;
8455         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8456                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8457         else
8458                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8459
8460 update:
8461         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8462         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8463         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8464
8465         return 0;
8466 }
8467
8468 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8469                                           enum hnae3_port_base_vlan_state state,
8470                                           u16 vlan)
8471 {
8472         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8473                 if (!vlan)
8474                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8475                 else
8476                         return HNAE3_PORT_BASE_VLAN_ENABLE;
8477         } else {
8478                 if (!vlan)
8479                         return HNAE3_PORT_BASE_VLAN_DISABLE;
8480                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8481                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8482                 else
8483                         return HNAE3_PORT_BASE_VLAN_MODIFY;
8484         }
8485 }
8486
8487 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8488                                     u16 vlan, u8 qos, __be16 proto)
8489 {
8490         struct hclge_vport *vport = hclge_get_vport(handle);
8491         struct hclge_dev *hdev = vport->back;
8492         struct hclge_vlan_info vlan_info;
8493         u16 state;
8494         int ret;
8495
8496         if (hdev->pdev->revision == 0x20)
8497                 return -EOPNOTSUPP;
8498
8499         vport = hclge_get_vf_vport(hdev, vfid);
8500         if (!vport)
8501                 return -EINVAL;
8502
8503         /* qos is a 3 bits value, so can not be bigger than 7 */
8504         if (vlan > VLAN_N_VID - 1 || qos > 7)
8505                 return -EINVAL;
8506         if (proto != htons(ETH_P_8021Q))
8507                 return -EPROTONOSUPPORT;
8508
8509         state = hclge_get_port_base_vlan_state(vport,
8510                                                vport->port_base_vlan_cfg.state,
8511                                                vlan);
8512         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8513                 return 0;
8514
8515         vlan_info.vlan_tag = vlan;
8516         vlan_info.qos = qos;
8517         vlan_info.vlan_proto = ntohs(proto);
8518
8519         if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8520                 return hclge_update_port_base_vlan_cfg(vport, state,
8521                                                        &vlan_info);
8522         } else {
8523                 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
8524                                                         vport->vport_id, state,
8525                                                         vlan, qos,
8526                                                         ntohs(proto));
8527                 return ret;
8528         }
8529 }
8530
8531 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
8532                           u16 vlan_id, bool is_kill)
8533 {
8534         struct hclge_vport *vport = hclge_get_vport(handle);
8535         struct hclge_dev *hdev = vport->back;
8536         bool writen_to_tbl = false;
8537         int ret = 0;
8538
8539         /* When device is resetting, firmware is unable to handle
8540          * mailbox. Just record the vlan id, and remove it after
8541          * reset finished.
8542          */
8543         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
8544                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8545                 return -EBUSY;
8546         }
8547
8548         /* when port base vlan enabled, we use port base vlan as the vlan
8549          * filter entry. In this case, we don't update vlan filter table
8550          * when user add new vlan or remove exist vlan, just update the vport
8551          * vlan list. The vlan id in vlan list will be writen in vlan filter
8552          * table until port base vlan disabled
8553          */
8554         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8555                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
8556                                                vlan_id, is_kill);
8557                 writen_to_tbl = true;
8558         }
8559
8560         if (!ret) {
8561                 if (is_kill)
8562                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
8563                 else
8564                         hclge_add_vport_vlan_table(vport, vlan_id,
8565                                                    writen_to_tbl);
8566         } else if (is_kill) {
8567                 /* when remove hw vlan filter failed, record the vlan id,
8568                  * and try to remove it from hw later, to be consistence
8569                  * with stack
8570                  */
8571                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8572         }
8573         return ret;
8574 }
8575
8576 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
8577 {
8578 #define HCLGE_MAX_SYNC_COUNT    60
8579
8580         int i, ret, sync_cnt = 0;
8581         u16 vlan_id;
8582
8583         /* start from vport 1 for PF is always alive */
8584         for (i = 0; i < hdev->num_alloc_vport; i++) {
8585                 struct hclge_vport *vport = &hdev->vport[i];
8586
8587                 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8588                                          VLAN_N_VID);
8589                 while (vlan_id != VLAN_N_VID) {
8590                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8591                                                        vport->vport_id, vlan_id,
8592                                                        true);
8593                         if (ret && ret != -EINVAL)
8594                                 return;
8595
8596                         clear_bit(vlan_id, vport->vlan_del_fail_bmap);
8597                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
8598
8599                         sync_cnt++;
8600                         if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
8601                                 return;
8602
8603                         vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8604                                                  VLAN_N_VID);
8605                 }
8606         }
8607 }
8608
8609 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
8610 {
8611         struct hclge_config_max_frm_size_cmd *req;
8612         struct hclge_desc desc;
8613
8614         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
8615
8616         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
8617         req->max_frm_size = cpu_to_le16(new_mps);
8618         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
8619
8620         return hclge_cmd_send(&hdev->hw, &desc, 1);
8621 }
8622
8623 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
8624 {
8625         struct hclge_vport *vport = hclge_get_vport(handle);
8626
8627         return hclge_set_vport_mtu(vport, new_mtu);
8628 }
8629
8630 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
8631 {
8632         struct hclge_dev *hdev = vport->back;
8633         int i, max_frm_size, ret;
8634
8635         /* HW supprt 2 layer vlan */
8636         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8637         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
8638             max_frm_size > HCLGE_MAC_MAX_FRAME)
8639                 return -EINVAL;
8640
8641         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
8642         mutex_lock(&hdev->vport_lock);
8643         /* VF's mps must fit within hdev->mps */
8644         if (vport->vport_id && max_frm_size > hdev->mps) {
8645                 mutex_unlock(&hdev->vport_lock);
8646                 return -EINVAL;
8647         } else if (vport->vport_id) {
8648                 vport->mps = max_frm_size;
8649                 mutex_unlock(&hdev->vport_lock);
8650                 return 0;
8651         }
8652
8653         /* PF's mps must be greater then VF's mps */
8654         for (i = 1; i < hdev->num_alloc_vport; i++)
8655                 if (max_frm_size < hdev->vport[i].mps) {
8656                         mutex_unlock(&hdev->vport_lock);
8657                         return -EINVAL;
8658                 }
8659
8660         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8661
8662         ret = hclge_set_mac_mtu(hdev, max_frm_size);
8663         if (ret) {
8664                 dev_err(&hdev->pdev->dev,
8665                         "Change mtu fail, ret =%d\n", ret);
8666                 goto out;
8667         }
8668
8669         hdev->mps = max_frm_size;
8670         vport->mps = max_frm_size;
8671
8672         ret = hclge_buffer_alloc(hdev);
8673         if (ret)
8674                 dev_err(&hdev->pdev->dev,
8675                         "Allocate buffer fail, ret =%d\n", ret);
8676
8677 out:
8678         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8679         mutex_unlock(&hdev->vport_lock);
8680         return ret;
8681 }
8682
8683 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
8684                                     bool enable)
8685 {
8686         struct hclge_reset_tqp_queue_cmd *req;
8687         struct hclge_desc desc;
8688         int ret;
8689
8690         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
8691
8692         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8693         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8694         if (enable)
8695                 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
8696
8697         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8698         if (ret) {
8699                 dev_err(&hdev->pdev->dev,
8700                         "Send tqp reset cmd error, status =%d\n", ret);
8701                 return ret;
8702         }
8703
8704         return 0;
8705 }
8706
8707 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8708 {
8709         struct hclge_reset_tqp_queue_cmd *req;
8710         struct hclge_desc desc;
8711         int ret;
8712
8713         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8714
8715         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8716         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8717
8718         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8719         if (ret) {
8720                 dev_err(&hdev->pdev->dev,
8721                         "Get reset status error, status =%d\n", ret);
8722                 return ret;
8723         }
8724
8725         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8726 }
8727
8728 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8729 {
8730         struct hnae3_queue *queue;
8731         struct hclge_tqp *tqp;
8732
8733         queue = handle->kinfo.tqp[queue_id];
8734         tqp = container_of(queue, struct hclge_tqp, q);
8735
8736         return tqp->index;
8737 }
8738
8739 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8740 {
8741         struct hclge_vport *vport = hclge_get_vport(handle);
8742         struct hclge_dev *hdev = vport->back;
8743         int reset_try_times = 0;
8744         int reset_status;
8745         u16 queue_gid;
8746         int ret;
8747
8748         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8749
8750         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8751         if (ret) {
8752                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8753                 return ret;
8754         }
8755
8756         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8757         if (ret) {
8758                 dev_err(&hdev->pdev->dev,
8759                         "Send reset tqp cmd fail, ret = %d\n", ret);
8760                 return ret;
8761         }
8762
8763         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8764                 reset_status = hclge_get_reset_status(hdev, queue_gid);
8765                 if (reset_status)
8766                         break;
8767
8768                 /* Wait for tqp hw reset */
8769                 usleep_range(1000, 1200);
8770         }
8771
8772         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8773                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8774                 return ret;
8775         }
8776
8777         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8778         if (ret)
8779                 dev_err(&hdev->pdev->dev,
8780                         "Deassert the soft reset fail, ret = %d\n", ret);
8781
8782         return ret;
8783 }
8784
8785 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8786 {
8787         struct hclge_dev *hdev = vport->back;
8788         int reset_try_times = 0;
8789         int reset_status;
8790         u16 queue_gid;
8791         int ret;
8792
8793         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8794
8795         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8796         if (ret) {
8797                 dev_warn(&hdev->pdev->dev,
8798                          "Send reset tqp cmd fail, ret = %d\n", ret);
8799                 return;
8800         }
8801
8802         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8803                 reset_status = hclge_get_reset_status(hdev, queue_gid);
8804                 if (reset_status)
8805                         break;
8806
8807                 /* Wait for tqp hw reset */
8808                 usleep_range(1000, 1200);
8809         }
8810
8811         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8812                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8813                 return;
8814         }
8815
8816         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8817         if (ret)
8818                 dev_warn(&hdev->pdev->dev,
8819                          "Deassert the soft reset fail, ret = %d\n", ret);
8820 }
8821
8822 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8823 {
8824         struct hclge_vport *vport = hclge_get_vport(handle);
8825         struct hclge_dev *hdev = vport->back;
8826
8827         return hdev->fw_version;
8828 }
8829
8830 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8831 {
8832         struct phy_device *phydev = hdev->hw.mac.phydev;
8833
8834         if (!phydev)
8835                 return;
8836
8837         phy_set_asym_pause(phydev, rx_en, tx_en);
8838 }
8839
8840 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8841 {
8842         int ret;
8843
8844         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8845                 return 0;
8846
8847         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8848         if (ret)
8849                 dev_err(&hdev->pdev->dev,
8850                         "configure pauseparam error, ret = %d.\n", ret);
8851
8852         return ret;
8853 }
8854
8855 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8856 {
8857         struct phy_device *phydev = hdev->hw.mac.phydev;
8858         u16 remote_advertising = 0;
8859         u16 local_advertising;
8860         u32 rx_pause, tx_pause;
8861         u8 flowctl;
8862
8863         if (!phydev->link || !phydev->autoneg)
8864                 return 0;
8865
8866         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8867
8868         if (phydev->pause)
8869                 remote_advertising = LPA_PAUSE_CAP;
8870
8871         if (phydev->asym_pause)
8872                 remote_advertising |= LPA_PAUSE_ASYM;
8873
8874         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8875                                            remote_advertising);
8876         tx_pause = flowctl & FLOW_CTRL_TX;
8877         rx_pause = flowctl & FLOW_CTRL_RX;
8878
8879         if (phydev->duplex == HCLGE_MAC_HALF) {
8880                 tx_pause = 0;
8881                 rx_pause = 0;
8882         }
8883
8884         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8885 }
8886
8887 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8888                                  u32 *rx_en, u32 *tx_en)
8889 {
8890         struct hclge_vport *vport = hclge_get_vport(handle);
8891         struct hclge_dev *hdev = vport->back;
8892         struct phy_device *phydev = hdev->hw.mac.phydev;
8893
8894         *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8895
8896         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8897                 *rx_en = 0;
8898                 *tx_en = 0;
8899                 return;
8900         }
8901
8902         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8903                 *rx_en = 1;
8904                 *tx_en = 0;
8905         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8906                 *tx_en = 1;
8907                 *rx_en = 0;
8908         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8909                 *rx_en = 1;
8910                 *tx_en = 1;
8911         } else {
8912                 *rx_en = 0;
8913                 *tx_en = 0;
8914         }
8915 }
8916
8917 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
8918                                          u32 rx_en, u32 tx_en)
8919 {
8920         if (rx_en && tx_en)
8921                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
8922         else if (rx_en && !tx_en)
8923                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8924         else if (!rx_en && tx_en)
8925                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8926         else
8927                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
8928
8929         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8930 }
8931
8932 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8933                                 u32 rx_en, u32 tx_en)
8934 {
8935         struct hclge_vport *vport = hclge_get_vport(handle);
8936         struct hclge_dev *hdev = vport->back;
8937         struct phy_device *phydev = hdev->hw.mac.phydev;
8938         u32 fc_autoneg;
8939
8940         if (phydev) {
8941                 fc_autoneg = hclge_get_autoneg(handle);
8942                 if (auto_neg != fc_autoneg) {
8943                         dev_info(&hdev->pdev->dev,
8944                                  "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8945                         return -EOPNOTSUPP;
8946                 }
8947         }
8948
8949         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8950                 dev_info(&hdev->pdev->dev,
8951                          "Priority flow control enabled. Cannot set link flow control.\n");
8952                 return -EOPNOTSUPP;
8953         }
8954
8955         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8956
8957         hclge_record_user_pauseparam(hdev, rx_en, tx_en);
8958
8959         if (!auto_neg)
8960                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8961
8962         if (phydev)
8963                 return phy_start_aneg(phydev);
8964
8965         return -EOPNOTSUPP;
8966 }
8967
8968 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8969                                           u8 *auto_neg, u32 *speed, u8 *duplex)
8970 {
8971         struct hclge_vport *vport = hclge_get_vport(handle);
8972         struct hclge_dev *hdev = vport->back;
8973
8974         if (speed)
8975                 *speed = hdev->hw.mac.speed;
8976         if (duplex)
8977                 *duplex = hdev->hw.mac.duplex;
8978         if (auto_neg)
8979                 *auto_neg = hdev->hw.mac.autoneg;
8980 }
8981
8982 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8983                                  u8 *module_type)
8984 {
8985         struct hclge_vport *vport = hclge_get_vport(handle);
8986         struct hclge_dev *hdev = vport->back;
8987
8988         if (media_type)
8989                 *media_type = hdev->hw.mac.media_type;
8990
8991         if (module_type)
8992                 *module_type = hdev->hw.mac.module_type;
8993 }
8994
8995 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8996                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8997 {
8998         struct hclge_vport *vport = hclge_get_vport(handle);
8999         struct hclge_dev *hdev = vport->back;
9000         struct phy_device *phydev = hdev->hw.mac.phydev;
9001         int mdix_ctrl, mdix, is_resolved;
9002         unsigned int retval;
9003
9004         if (!phydev) {
9005                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9006                 *tp_mdix = ETH_TP_MDI_INVALID;
9007                 return;
9008         }
9009
9010         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
9011
9012         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
9013         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
9014                                     HCLGE_PHY_MDIX_CTRL_S);
9015
9016         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
9017         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
9018         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
9019
9020         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
9021
9022         switch (mdix_ctrl) {
9023         case 0x0:
9024                 *tp_mdix_ctrl = ETH_TP_MDI;
9025                 break;
9026         case 0x1:
9027                 *tp_mdix_ctrl = ETH_TP_MDI_X;
9028                 break;
9029         case 0x3:
9030                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
9031                 break;
9032         default:
9033                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9034                 break;
9035         }
9036
9037         if (!is_resolved)
9038                 *tp_mdix = ETH_TP_MDI_INVALID;
9039         else if (mdix)
9040                 *tp_mdix = ETH_TP_MDI_X;
9041         else
9042                 *tp_mdix = ETH_TP_MDI;
9043 }
9044
9045 static void hclge_info_show(struct hclge_dev *hdev)
9046 {
9047         struct device *dev = &hdev->pdev->dev;
9048
9049         dev_info(dev, "PF info begin:\n");
9050
9051         dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9052         dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9053         dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9054         dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9055         dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9056         dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9057         dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9058         dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9059         dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9060         dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9061         dev_info(dev, "This is %s PF\n",
9062                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9063         dev_info(dev, "DCB %s\n",
9064                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9065         dev_info(dev, "MQPRIO %s\n",
9066                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9067
9068         dev_info(dev, "PF info end.\n");
9069 }
9070
9071 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9072                                           struct hclge_vport *vport)
9073 {
9074         struct hnae3_client *client = vport->nic.client;
9075         struct hclge_dev *hdev = ae_dev->priv;
9076         int rst_cnt = hdev->rst_stats.reset_cnt;
9077         int ret;
9078
9079         ret = client->ops->init_instance(&vport->nic);
9080         if (ret)
9081                 return ret;
9082
9083         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9084         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9085             rst_cnt != hdev->rst_stats.reset_cnt) {
9086                 ret = -EBUSY;
9087                 goto init_nic_err;
9088         }
9089
9090         /* Enable nic hw error interrupts */
9091         ret = hclge_config_nic_hw_error(hdev, true);
9092         if (ret) {
9093                 dev_err(&ae_dev->pdev->dev,
9094                         "fail(%d) to enable hw error interrupts\n", ret);
9095                 goto init_nic_err;
9096         }
9097
9098         hnae3_set_client_init_flag(client, ae_dev, 1);
9099
9100         if (netif_msg_drv(&hdev->vport->nic))
9101                 hclge_info_show(hdev);
9102
9103         return ret;
9104
9105 init_nic_err:
9106         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9107         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9108                 msleep(HCLGE_WAIT_RESET_DONE);
9109
9110         client->ops->uninit_instance(&vport->nic, 0);
9111
9112         return ret;
9113 }
9114
9115 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9116                                            struct hclge_vport *vport)
9117 {
9118         struct hnae3_client *client = vport->roce.client;
9119         struct hclge_dev *hdev = ae_dev->priv;
9120         int rst_cnt;
9121         int ret;
9122
9123         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9124             !hdev->nic_client)
9125                 return 0;
9126
9127         client = hdev->roce_client;
9128         ret = hclge_init_roce_base_info(vport);
9129         if (ret)
9130                 return ret;
9131
9132         rst_cnt = hdev->rst_stats.reset_cnt;
9133         ret = client->ops->init_instance(&vport->roce);
9134         if (ret)
9135                 return ret;
9136
9137         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9138         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9139             rst_cnt != hdev->rst_stats.reset_cnt) {
9140                 ret = -EBUSY;
9141                 goto init_roce_err;
9142         }
9143
9144         /* Enable roce ras interrupts */
9145         ret = hclge_config_rocee_ras_interrupt(hdev, true);
9146         if (ret) {
9147                 dev_err(&ae_dev->pdev->dev,
9148                         "fail(%d) to enable roce ras interrupts\n", ret);
9149                 goto init_roce_err;
9150         }
9151
9152         hnae3_set_client_init_flag(client, ae_dev, 1);
9153
9154         return 0;
9155
9156 init_roce_err:
9157         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9158         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9159                 msleep(HCLGE_WAIT_RESET_DONE);
9160
9161         hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9162
9163         return ret;
9164 }
9165
9166 static int hclge_init_client_instance(struct hnae3_client *client,
9167                                       struct hnae3_ae_dev *ae_dev)
9168 {
9169         struct hclge_dev *hdev = ae_dev->priv;
9170         struct hclge_vport *vport;
9171         int i, ret;
9172
9173         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
9174                 vport = &hdev->vport[i];
9175
9176                 switch (client->type) {
9177                 case HNAE3_CLIENT_KNIC:
9178                         hdev->nic_client = client;
9179                         vport->nic.client = client;
9180                         ret = hclge_init_nic_client_instance(ae_dev, vport);
9181                         if (ret)
9182                                 goto clear_nic;
9183
9184                         ret = hclge_init_roce_client_instance(ae_dev, vport);
9185                         if (ret)
9186                                 goto clear_roce;
9187
9188                         break;
9189                 case HNAE3_CLIENT_ROCE:
9190                         if (hnae3_dev_roce_supported(hdev)) {
9191                                 hdev->roce_client = client;
9192                                 vport->roce.client = client;
9193                         }
9194
9195                         ret = hclge_init_roce_client_instance(ae_dev, vport);
9196                         if (ret)
9197                                 goto clear_roce;
9198
9199                         break;
9200                 default:
9201                         return -EINVAL;
9202                 }
9203         }
9204
9205         return 0;
9206
9207 clear_nic:
9208         hdev->nic_client = NULL;
9209         vport->nic.client = NULL;
9210         return ret;
9211 clear_roce:
9212         hdev->roce_client = NULL;
9213         vport->roce.client = NULL;
9214         return ret;
9215 }
9216
9217 static void hclge_uninit_client_instance(struct hnae3_client *client,
9218                                          struct hnae3_ae_dev *ae_dev)
9219 {
9220         struct hclge_dev *hdev = ae_dev->priv;
9221         struct hclge_vport *vport;
9222         int i;
9223
9224         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9225                 vport = &hdev->vport[i];
9226                 if (hdev->roce_client) {
9227                         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9228                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9229                                 msleep(HCLGE_WAIT_RESET_DONE);
9230
9231                         hdev->roce_client->ops->uninit_instance(&vport->roce,
9232                                                                 0);
9233                         hdev->roce_client = NULL;
9234                         vport->roce.client = NULL;
9235                 }
9236                 if (client->type == HNAE3_CLIENT_ROCE)
9237                         return;
9238                 if (hdev->nic_client && client->ops->uninit_instance) {
9239                         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9240                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9241                                 msleep(HCLGE_WAIT_RESET_DONE);
9242
9243                         client->ops->uninit_instance(&vport->nic, 0);
9244                         hdev->nic_client = NULL;
9245                         vport->nic.client = NULL;
9246                 }
9247         }
9248 }
9249
9250 static int hclge_pci_init(struct hclge_dev *hdev)
9251 {
9252         struct pci_dev *pdev = hdev->pdev;
9253         struct hclge_hw *hw;
9254         int ret;
9255
9256         ret = pci_enable_device(pdev);
9257         if (ret) {
9258                 dev_err(&pdev->dev, "failed to enable PCI device\n");
9259                 return ret;
9260         }
9261
9262         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9263         if (ret) {
9264                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9265                 if (ret) {
9266                         dev_err(&pdev->dev,
9267                                 "can't set consistent PCI DMA");
9268                         goto err_disable_device;
9269                 }
9270                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9271         }
9272
9273         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9274         if (ret) {
9275                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9276                 goto err_disable_device;
9277         }
9278
9279         pci_set_master(pdev);
9280         hw = &hdev->hw;
9281         hw->io_base = pcim_iomap(pdev, 2, 0);
9282         if (!hw->io_base) {
9283                 dev_err(&pdev->dev, "Can't map configuration register space\n");
9284                 ret = -ENOMEM;
9285                 goto err_clr_master;
9286         }
9287
9288         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9289
9290         return 0;
9291 err_clr_master:
9292         pci_clear_master(pdev);
9293         pci_release_regions(pdev);
9294 err_disable_device:
9295         pci_disable_device(pdev);
9296
9297         return ret;
9298 }
9299
9300 static void hclge_pci_uninit(struct hclge_dev *hdev)
9301 {
9302         struct pci_dev *pdev = hdev->pdev;
9303
9304         pcim_iounmap(pdev, hdev->hw.io_base);
9305         pci_free_irq_vectors(pdev);
9306         pci_clear_master(pdev);
9307         pci_release_mem_regions(pdev);
9308         pci_disable_device(pdev);
9309 }
9310
9311 static void hclge_state_init(struct hclge_dev *hdev)
9312 {
9313         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9314         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9315         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9316         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9317         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
9318         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9319         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9320 }
9321
9322 static void hclge_state_uninit(struct hclge_dev *hdev)
9323 {
9324         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9325         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9326
9327         if (hdev->reset_timer.function)
9328                 del_timer_sync(&hdev->reset_timer);
9329         if (hdev->service_task.work.func)
9330                 cancel_delayed_work_sync(&hdev->service_task);
9331 }
9332
9333 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9334 {
9335 #define HCLGE_FLR_WAIT_MS       100
9336 #define HCLGE_FLR_WAIT_CNT      50
9337         struct hclge_dev *hdev = ae_dev->priv;
9338         int cnt = 0;
9339
9340         clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
9341         clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
9342         set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
9343         hclge_reset_event(hdev->pdev, NULL);
9344
9345         while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
9346                cnt++ < HCLGE_FLR_WAIT_CNT)
9347                 msleep(HCLGE_FLR_WAIT_MS);
9348
9349         if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
9350                 dev_err(&hdev->pdev->dev,
9351                         "flr wait down timeout: %d\n", cnt);
9352 }
9353
9354 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9355 {
9356         struct hclge_dev *hdev = ae_dev->priv;
9357
9358         set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
9359 }
9360
9361 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9362 {
9363         u16 i;
9364
9365         for (i = 0; i < hdev->num_alloc_vport; i++) {
9366                 struct hclge_vport *vport = &hdev->vport[i];
9367                 int ret;
9368
9369                  /* Send cmd to clear VF's FUNC_RST_ING */
9370                 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9371                 if (ret)
9372                         dev_warn(&hdev->pdev->dev,
9373                                  "clear vf(%u) rst failed %d!\n",
9374                                  vport->vport_id, ret);
9375         }
9376 }
9377
9378 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9379 {
9380         struct pci_dev *pdev = ae_dev->pdev;
9381         struct hclge_dev *hdev;
9382         int ret;
9383
9384         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9385         if (!hdev) {
9386                 ret = -ENOMEM;
9387                 goto out;
9388         }
9389
9390         hdev->pdev = pdev;
9391         hdev->ae_dev = ae_dev;
9392         hdev->reset_type = HNAE3_NONE_RESET;
9393         hdev->reset_level = HNAE3_FUNC_RESET;
9394         ae_dev->priv = hdev;
9395
9396         /* HW supprt 2 layer vlan */
9397         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9398
9399         mutex_init(&hdev->vport_lock);
9400         spin_lock_init(&hdev->fd_rule_lock);
9401
9402         ret = hclge_pci_init(hdev);
9403         if (ret) {
9404                 dev_err(&pdev->dev, "PCI init failed\n");
9405                 goto out;
9406         }
9407
9408         /* Firmware command queue initialize */
9409         ret = hclge_cmd_queue_init(hdev);
9410         if (ret) {
9411                 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
9412                 goto err_pci_uninit;
9413         }
9414
9415         /* Firmware command initialize */
9416         ret = hclge_cmd_init(hdev);
9417         if (ret)
9418                 goto err_cmd_uninit;
9419
9420         ret = hclge_get_cap(hdev);
9421         if (ret) {
9422                 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
9423                         ret);
9424                 goto err_cmd_uninit;
9425         }
9426
9427         ret = hclge_configure(hdev);
9428         if (ret) {
9429                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9430                 goto err_cmd_uninit;
9431         }
9432
9433         ret = hclge_init_msi(hdev);
9434         if (ret) {
9435                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9436                 goto err_cmd_uninit;
9437         }
9438
9439         ret = hclge_misc_irq_init(hdev);
9440         if (ret) {
9441                 dev_err(&pdev->dev,
9442                         "Misc IRQ(vector0) init error, ret = %d.\n",
9443                         ret);
9444                 goto err_msi_uninit;
9445         }
9446
9447         ret = hclge_alloc_tqps(hdev);
9448         if (ret) {
9449                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9450                 goto err_msi_irq_uninit;
9451         }
9452
9453         ret = hclge_alloc_vport(hdev);
9454         if (ret) {
9455                 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
9456                 goto err_msi_irq_uninit;
9457         }
9458
9459         ret = hclge_map_tqp(hdev);
9460         if (ret) {
9461                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9462                 goto err_msi_irq_uninit;
9463         }
9464
9465         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9466                 ret = hclge_mac_mdio_config(hdev);
9467                 if (ret) {
9468                         dev_err(&hdev->pdev->dev,
9469                                 "mdio config fail ret=%d\n", ret);
9470                         goto err_msi_irq_uninit;
9471                 }
9472         }
9473
9474         ret = hclge_init_umv_space(hdev);
9475         if (ret) {
9476                 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
9477                 goto err_mdiobus_unreg;
9478         }
9479
9480         ret = hclge_mac_init(hdev);
9481         if (ret) {
9482                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9483                 goto err_mdiobus_unreg;
9484         }
9485
9486         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9487         if (ret) {
9488                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9489                 goto err_mdiobus_unreg;
9490         }
9491
9492         ret = hclge_config_gro(hdev, true);
9493         if (ret)
9494                 goto err_mdiobus_unreg;
9495
9496         ret = hclge_init_vlan_config(hdev);
9497         if (ret) {
9498                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9499                 goto err_mdiobus_unreg;
9500         }
9501
9502         ret = hclge_tm_schd_init(hdev);
9503         if (ret) {
9504                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
9505                 goto err_mdiobus_unreg;
9506         }
9507
9508         hclge_rss_init_cfg(hdev);
9509         ret = hclge_rss_init_hw(hdev);
9510         if (ret) {
9511                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9512                 goto err_mdiobus_unreg;
9513         }
9514
9515         ret = init_mgr_tbl(hdev);
9516         if (ret) {
9517                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
9518                 goto err_mdiobus_unreg;
9519         }
9520
9521         ret = hclge_init_fd_config(hdev);
9522         if (ret) {
9523                 dev_err(&pdev->dev,
9524                         "fd table init fail, ret=%d\n", ret);
9525                 goto err_mdiobus_unreg;
9526         }
9527
9528         INIT_KFIFO(hdev->mac_tnl_log);
9529
9530         hclge_dcb_ops_set(hdev);
9531
9532         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
9533         INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
9534
9535         /* Setup affinity after service timer setup because add_timer_on
9536          * is called in affinity notify.
9537          */
9538         hclge_misc_affinity_setup(hdev);
9539
9540         hclge_clear_all_event_cause(hdev);
9541         hclge_clear_resetting_state(hdev);
9542
9543         /* Log and clear the hw errors those already occurred */
9544         hclge_handle_all_hns_hw_errors(ae_dev);
9545
9546         /* request delayed reset for the error recovery because an immediate
9547          * global reset on a PF affecting pending initialization of other PFs
9548          */
9549         if (ae_dev->hw_err_reset_req) {
9550                 enum hnae3_reset_type reset_level;
9551
9552                 reset_level = hclge_get_reset_level(ae_dev,
9553                                                     &ae_dev->hw_err_reset_req);
9554                 hclge_set_def_reset_request(ae_dev, reset_level);
9555                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9556         }
9557
9558         /* Enable MISC vector(vector0) */
9559         hclge_enable_vector(&hdev->misc_vector, true);
9560
9561         hclge_state_init(hdev);
9562         hdev->last_reset_time = jiffies;
9563
9564         dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
9565                  HCLGE_DRIVER_NAME);
9566
9567         hclge_task_schedule(hdev, round_jiffies_relative(HZ));
9568
9569         return 0;
9570
9571 err_mdiobus_unreg:
9572         if (hdev->hw.mac.phydev)
9573                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
9574 err_msi_irq_uninit:
9575         hclge_misc_irq_uninit(hdev);
9576 err_msi_uninit:
9577         pci_free_irq_vectors(pdev);
9578 err_cmd_uninit:
9579         hclge_cmd_uninit(hdev);
9580 err_pci_uninit:
9581         pcim_iounmap(pdev, hdev->hw.io_base);
9582         pci_clear_master(pdev);
9583         pci_release_regions(pdev);
9584         pci_disable_device(pdev);
9585 out:
9586         return ret;
9587 }
9588
9589 static void hclge_stats_clear(struct hclge_dev *hdev)
9590 {
9591         memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
9592 }
9593
9594 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9595 {
9596         return hclge_config_switch_param(hdev, vf, enable,
9597                                          HCLGE_SWITCH_ANTI_SPOOF_MASK);
9598 }
9599
9600 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9601 {
9602         return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9603                                           HCLGE_FILTER_FE_NIC_INGRESS_B,
9604                                           enable, vf);
9605 }
9606
9607 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
9608 {
9609         int ret;
9610
9611         ret = hclge_set_mac_spoofchk(hdev, vf, enable);
9612         if (ret) {
9613                 dev_err(&hdev->pdev->dev,
9614                         "Set vf %d mac spoof check %s failed, ret=%d\n",
9615                         vf, enable ? "on" : "off", ret);
9616                 return ret;
9617         }
9618
9619         ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
9620         if (ret)
9621                 dev_err(&hdev->pdev->dev,
9622                         "Set vf %d vlan spoof check %s failed, ret=%d\n",
9623                         vf, enable ? "on" : "off", ret);
9624
9625         return ret;
9626 }
9627
9628 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
9629                                  bool enable)
9630 {
9631         struct hclge_vport *vport = hclge_get_vport(handle);
9632         struct hclge_dev *hdev = vport->back;
9633         u32 new_spoofchk = enable ? 1 : 0;
9634         int ret;
9635
9636         if (hdev->pdev->revision == 0x20)
9637                 return -EOPNOTSUPP;
9638
9639         vport = hclge_get_vf_vport(hdev, vf);
9640         if (!vport)
9641                 return -EINVAL;
9642
9643         if (vport->vf_info.spoofchk == new_spoofchk)
9644                 return 0;
9645
9646         if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
9647                 dev_warn(&hdev->pdev->dev,
9648                          "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
9649                          vf);
9650         else if (enable && hclge_is_umv_space_full(vport))
9651                 dev_warn(&hdev->pdev->dev,
9652                          "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
9653                          vf);
9654
9655         ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
9656         if (ret)
9657                 return ret;
9658
9659         vport->vf_info.spoofchk = new_spoofchk;
9660         return 0;
9661 }
9662
9663 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
9664 {
9665         struct hclge_vport *vport = hdev->vport;
9666         int ret;
9667         int i;
9668
9669         if (hdev->pdev->revision == 0x20)
9670                 return 0;
9671
9672         /* resume the vf spoof check state after reset */
9673         for (i = 0; i < hdev->num_alloc_vport; i++) {
9674                 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
9675                                                vport->vf_info.spoofchk);
9676                 if (ret)
9677                         return ret;
9678
9679                 vport++;
9680         }
9681
9682         return 0;
9683 }
9684
9685 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
9686 {
9687         struct hclge_vport *vport = hclge_get_vport(handle);
9688         struct hclge_dev *hdev = vport->back;
9689         u32 new_trusted = enable ? 1 : 0;
9690         bool en_bc_pmc;
9691         int ret;
9692
9693         vport = hclge_get_vf_vport(hdev, vf);
9694         if (!vport)
9695                 return -EINVAL;
9696
9697         if (vport->vf_info.trusted == new_trusted)
9698                 return 0;
9699
9700         /* Disable promisc mode for VF if it is not trusted any more. */
9701         if (!enable && vport->vf_info.promisc_enable) {
9702                 en_bc_pmc = hdev->pdev->revision != 0x20;
9703                 ret = hclge_set_vport_promisc_mode(vport, false, false,
9704                                                    en_bc_pmc);
9705                 if (ret)
9706                         return ret;
9707                 vport->vf_info.promisc_enable = 0;
9708                 hclge_inform_vf_promisc_info(vport);
9709         }
9710
9711         vport->vf_info.trusted = new_trusted;
9712
9713         return 0;
9714 }
9715
9716 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
9717 {
9718         int ret;
9719         int vf;
9720
9721         /* reset vf rate to default value */
9722         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9723                 struct hclge_vport *vport = &hdev->vport[vf];
9724
9725                 vport->vf_info.max_tx_rate = 0;
9726                 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
9727                 if (ret)
9728                         dev_err(&hdev->pdev->dev,
9729                                 "vf%d failed to reset to default, ret=%d\n",
9730                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
9731         }
9732 }
9733
9734 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
9735                                      int min_tx_rate, int max_tx_rate)
9736 {
9737         if (min_tx_rate != 0 ||
9738             max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
9739                 dev_err(&hdev->pdev->dev,
9740                         "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
9741                         min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
9742                 return -EINVAL;
9743         }
9744
9745         return 0;
9746 }
9747
9748 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
9749                              int min_tx_rate, int max_tx_rate, bool force)
9750 {
9751         struct hclge_vport *vport = hclge_get_vport(handle);
9752         struct hclge_dev *hdev = vport->back;
9753         int ret;
9754
9755         ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
9756         if (ret)
9757                 return ret;
9758
9759         vport = hclge_get_vf_vport(hdev, vf);
9760         if (!vport)
9761                 return -EINVAL;
9762
9763         if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
9764                 return 0;
9765
9766         ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
9767         if (ret)
9768                 return ret;
9769
9770         vport->vf_info.max_tx_rate = max_tx_rate;
9771
9772         return 0;
9773 }
9774
9775 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
9776 {
9777         struct hnae3_handle *handle = &hdev->vport->nic;
9778         struct hclge_vport *vport;
9779         int ret;
9780         int vf;
9781
9782         /* resume the vf max_tx_rate after reset */
9783         for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
9784                 vport = hclge_get_vf_vport(hdev, vf);
9785                 if (!vport)
9786                         return -EINVAL;
9787
9788                 /* zero means max rate, after reset, firmware already set it to
9789                  * max rate, so just continue.
9790                  */
9791                 if (!vport->vf_info.max_tx_rate)
9792                         continue;
9793
9794                 ret = hclge_set_vf_rate(handle, vf, 0,
9795                                         vport->vf_info.max_tx_rate, true);
9796                 if (ret) {
9797                         dev_err(&hdev->pdev->dev,
9798                                 "vf%d failed to resume tx_rate:%u, ret=%d\n",
9799                                 vf, vport->vf_info.max_tx_rate, ret);
9800                         return ret;
9801                 }
9802         }
9803
9804         return 0;
9805 }
9806
9807 static void hclge_reset_vport_state(struct hclge_dev *hdev)
9808 {
9809         struct hclge_vport *vport = hdev->vport;
9810         int i;
9811
9812         for (i = 0; i < hdev->num_alloc_vport; i++) {
9813                 hclge_vport_stop(vport);
9814                 vport++;
9815         }
9816 }
9817
9818 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
9819 {
9820         struct hclge_dev *hdev = ae_dev->priv;
9821         struct pci_dev *pdev = ae_dev->pdev;
9822         int ret;
9823
9824         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9825
9826         hclge_stats_clear(hdev);
9827         memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
9828         memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
9829
9830         ret = hclge_cmd_init(hdev);
9831         if (ret) {
9832                 dev_err(&pdev->dev, "Cmd queue init failed\n");
9833                 return ret;
9834         }
9835
9836         ret = hclge_map_tqp(hdev);
9837         if (ret) {
9838                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9839                 return ret;
9840         }
9841
9842         hclge_reset_umv_space(hdev);
9843
9844         ret = hclge_mac_init(hdev);
9845         if (ret) {
9846                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9847                 return ret;
9848         }
9849
9850         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9851         if (ret) {
9852                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9853                 return ret;
9854         }
9855
9856         ret = hclge_config_gro(hdev, true);
9857         if (ret)
9858                 return ret;
9859
9860         ret = hclge_init_vlan_config(hdev);
9861         if (ret) {
9862                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9863                 return ret;
9864         }
9865
9866         ret = hclge_tm_init_hw(hdev, true);
9867         if (ret) {
9868                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
9869                 return ret;
9870         }
9871
9872         ret = hclge_rss_init_hw(hdev);
9873         if (ret) {
9874                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9875                 return ret;
9876         }
9877
9878         ret = hclge_init_fd_config(hdev);
9879         if (ret) {
9880                 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
9881                 return ret;
9882         }
9883
9884         /* Log and clear the hw errors those already occurred */
9885         hclge_handle_all_hns_hw_errors(ae_dev);
9886
9887         /* Re-enable the hw error interrupts because
9888          * the interrupts get disabled on global reset.
9889          */
9890         ret = hclge_config_nic_hw_error(hdev, true);
9891         if (ret) {
9892                 dev_err(&pdev->dev,
9893                         "fail(%d) to re-enable NIC hw error interrupts\n",
9894                         ret);
9895                 return ret;
9896         }
9897
9898         if (hdev->roce_client) {
9899                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9900                 if (ret) {
9901                         dev_err(&pdev->dev,
9902                                 "fail(%d) to re-enable roce ras interrupts\n",
9903                                 ret);
9904                         return ret;
9905                 }
9906         }
9907
9908         hclge_reset_vport_state(hdev);
9909         ret = hclge_reset_vport_spoofchk(hdev);
9910         if (ret)
9911                 return ret;
9912
9913         ret = hclge_resume_vf_rate(hdev);
9914         if (ret)
9915                 return ret;
9916
9917         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
9918                  HCLGE_DRIVER_NAME);
9919
9920         return 0;
9921 }
9922
9923 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
9924 {
9925         struct hclge_dev *hdev = ae_dev->priv;
9926         struct hclge_mac *mac = &hdev->hw.mac;
9927
9928         hclge_reset_vf_rate(hdev);
9929         hclge_misc_affinity_teardown(hdev);
9930         hclge_state_uninit(hdev);
9931
9932         if (mac->phydev)
9933                 mdiobus_unregister(mac->mdio_bus);
9934
9935         hclge_uninit_umv_space(hdev);
9936
9937         /* Disable MISC vector(vector0) */
9938         hclge_enable_vector(&hdev->misc_vector, false);
9939         synchronize_irq(hdev->misc_vector.vector_irq);
9940
9941         /* Disable all hw interrupts */
9942         hclge_config_mac_tnl_int(hdev, false);
9943         hclge_config_nic_hw_error(hdev, false);
9944         hclge_config_rocee_ras_interrupt(hdev, false);
9945
9946         hclge_cmd_uninit(hdev);
9947         hclge_misc_irq_uninit(hdev);
9948         hclge_pci_uninit(hdev);
9949         mutex_destroy(&hdev->vport_lock);
9950         hclge_uninit_vport_mac_table(hdev);
9951         hclge_uninit_vport_vlan_table(hdev);
9952         ae_dev->priv = NULL;
9953 }
9954
9955 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9956 {
9957         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9958         struct hclge_vport *vport = hclge_get_vport(handle);
9959         struct hclge_dev *hdev = vport->back;
9960
9961         return min_t(u32, hdev->rss_size_max,
9962                      vport->alloc_tqps / kinfo->num_tc);
9963 }
9964
9965 static void hclge_get_channels(struct hnae3_handle *handle,
9966                                struct ethtool_channels *ch)
9967 {
9968         ch->max_combined = hclge_get_max_channels(handle);
9969         ch->other_count = 1;
9970         ch->max_other = 1;
9971         ch->combined_count = handle->kinfo.rss_size;
9972 }
9973
9974 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9975                                         u16 *alloc_tqps, u16 *max_rss_size)
9976 {
9977         struct hclge_vport *vport = hclge_get_vport(handle);
9978         struct hclge_dev *hdev = vport->back;
9979
9980         *alloc_tqps = vport->alloc_tqps;
9981         *max_rss_size = hdev->rss_size_max;
9982 }
9983
9984 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9985                               bool rxfh_configured)
9986 {
9987         struct hclge_vport *vport = hclge_get_vport(handle);
9988         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9989         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9990         struct hclge_dev *hdev = vport->back;
9991         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
9992         u16 cur_rss_size = kinfo->rss_size;
9993         u16 cur_tqps = kinfo->num_tqps;
9994         u16 tc_valid[HCLGE_MAX_TC_NUM];
9995         u16 roundup_size;
9996         u32 *rss_indir;
9997         unsigned int i;
9998         int ret;
9999
10000         kinfo->req_rss_size = new_tqps_num;
10001
10002         ret = hclge_tm_vport_map_update(hdev);
10003         if (ret) {
10004                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
10005                 return ret;
10006         }
10007
10008         roundup_size = roundup_pow_of_two(kinfo->rss_size);
10009         roundup_size = ilog2(roundup_size);
10010         /* Set the RSS TC mode according to the new RSS size */
10011         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
10012                 tc_valid[i] = 0;
10013
10014                 if (!(hdev->hw_tc_map & BIT(i)))
10015                         continue;
10016
10017                 tc_valid[i] = 1;
10018                 tc_size[i] = roundup_size;
10019                 tc_offset[i] = kinfo->rss_size * i;
10020         }
10021         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
10022         if (ret)
10023                 return ret;
10024
10025         /* RSS indirection table has been configuared by user */
10026         if (rxfh_configured)
10027                 goto out;
10028
10029         /* Reinitializes the rss indirect table according to the new RSS size */
10030         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10031         if (!rss_indir)
10032                 return -ENOMEM;
10033
10034         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10035                 rss_indir[i] = i % kinfo->rss_size;
10036
10037         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10038         if (ret)
10039                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10040                         ret);
10041
10042         kfree(rss_indir);
10043
10044 out:
10045         if (!ret)
10046                 dev_info(&hdev->pdev->dev,
10047                          "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10048                          cur_rss_size, kinfo->rss_size,
10049                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
10050
10051         return ret;
10052 }
10053
10054 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10055                               u32 *regs_num_64_bit)
10056 {
10057         struct hclge_desc desc;
10058         u32 total_num;
10059         int ret;
10060
10061         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10062         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10063         if (ret) {
10064                 dev_err(&hdev->pdev->dev,
10065                         "Query register number cmd failed, ret = %d.\n", ret);
10066                 return ret;
10067         }
10068
10069         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
10070         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
10071
10072         total_num = *regs_num_32_bit + *regs_num_64_bit;
10073         if (!total_num)
10074                 return -EINVAL;
10075
10076         return 0;
10077 }
10078
10079 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10080                                  void *data)
10081 {
10082 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10083 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10084
10085         struct hclge_desc *desc;
10086         u32 *reg_val = data;
10087         __le32 *desc_data;
10088         int nodata_num;
10089         int cmd_num;
10090         int i, k, n;
10091         int ret;
10092
10093         if (regs_num == 0)
10094                 return 0;
10095
10096         nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10097         cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10098                                HCLGE_32_BIT_REG_RTN_DATANUM);
10099         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10100         if (!desc)
10101                 return -ENOMEM;
10102
10103         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10104         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10105         if (ret) {
10106                 dev_err(&hdev->pdev->dev,
10107                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
10108                 kfree(desc);
10109                 return ret;
10110         }
10111
10112         for (i = 0; i < cmd_num; i++) {
10113                 if (i == 0) {
10114                         desc_data = (__le32 *)(&desc[i].data[0]);
10115                         n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10116                 } else {
10117                         desc_data = (__le32 *)(&desc[i]);
10118                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
10119                 }
10120                 for (k = 0; k < n; k++) {
10121                         *reg_val++ = le32_to_cpu(*desc_data++);
10122
10123                         regs_num--;
10124                         if (!regs_num)
10125                                 break;
10126                 }
10127         }
10128
10129         kfree(desc);
10130         return 0;
10131 }
10132
10133 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10134                                  void *data)
10135 {
10136 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10137 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10138
10139         struct hclge_desc *desc;
10140         u64 *reg_val = data;
10141         __le64 *desc_data;
10142         int nodata_len;
10143         int cmd_num;
10144         int i, k, n;
10145         int ret;
10146
10147         if (regs_num == 0)
10148                 return 0;
10149
10150         nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10151         cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10152                                HCLGE_64_BIT_REG_RTN_DATANUM);
10153         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10154         if (!desc)
10155                 return -ENOMEM;
10156
10157         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10158         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10159         if (ret) {
10160                 dev_err(&hdev->pdev->dev,
10161                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
10162                 kfree(desc);
10163                 return ret;
10164         }
10165
10166         for (i = 0; i < cmd_num; i++) {
10167                 if (i == 0) {
10168                         desc_data = (__le64 *)(&desc[i].data[0]);
10169                         n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10170                 } else {
10171                         desc_data = (__le64 *)(&desc[i]);
10172                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
10173                 }
10174                 for (k = 0; k < n; k++) {
10175                         *reg_val++ = le64_to_cpu(*desc_data++);
10176
10177                         regs_num--;
10178                         if (!regs_num)
10179                                 break;
10180                 }
10181         }
10182
10183         kfree(desc);
10184         return 0;
10185 }
10186
10187 #define MAX_SEPARATE_NUM        4
10188 #define SEPARATOR_VALUE         0xFDFCFBFA
10189 #define REG_NUM_PER_LINE        4
10190 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
10191 #define REG_SEPARATOR_LINE      1
10192 #define REG_NUM_REMAIN_MASK     3
10193 #define BD_LIST_MAX_NUM         30
10194
10195 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10196 {
10197         /*prepare 4 commands to query DFX BD number*/
10198         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
10199         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10200         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
10201         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10202         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
10203         desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10204         hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
10205
10206         return hclge_cmd_send(&hdev->hw, desc, 4);
10207 }
10208
10209 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10210                                     int *bd_num_list,
10211                                     u32 type_num)
10212 {
10213 #define HCLGE_DFX_REG_BD_NUM    4
10214
10215         u32 entries_per_desc, desc_index, index, offset, i;
10216         struct hclge_desc desc[HCLGE_DFX_REG_BD_NUM];
10217         int ret;
10218
10219         ret = hclge_query_bd_num_cmd_send(hdev, desc);
10220         if (ret) {
10221                 dev_err(&hdev->pdev->dev,
10222                         "Get dfx bd num fail, status is %d.\n", ret);
10223                 return ret;
10224         }
10225
10226         entries_per_desc = ARRAY_SIZE(desc[0].data);
10227         for (i = 0; i < type_num; i++) {
10228                 offset = hclge_dfx_bd_offset_list[i];
10229                 index = offset % entries_per_desc;
10230                 desc_index = offset / entries_per_desc;
10231                 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10232         }
10233
10234         return ret;
10235 }
10236
10237 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10238                                   struct hclge_desc *desc_src, int bd_num,
10239                                   enum hclge_opcode_type cmd)
10240 {
10241         struct hclge_desc *desc = desc_src;
10242         int i, ret;
10243
10244         hclge_cmd_setup_basic_desc(desc, cmd, true);
10245         for (i = 0; i < bd_num - 1; i++) {
10246                 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10247                 desc++;
10248                 hclge_cmd_setup_basic_desc(desc, cmd, true);
10249         }
10250
10251         desc = desc_src;
10252         ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10253         if (ret)
10254                 dev_err(&hdev->pdev->dev,
10255                         "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10256                         cmd, ret);
10257
10258         return ret;
10259 }
10260
10261 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10262                                     void *data)
10263 {
10264         int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10265         struct hclge_desc *desc = desc_src;
10266         u32 *reg = data;
10267
10268         entries_per_desc = ARRAY_SIZE(desc->data);
10269         reg_num = entries_per_desc * bd_num;
10270         separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10271         for (i = 0; i < reg_num; i++) {
10272                 index = i % entries_per_desc;
10273                 desc_index = i / entries_per_desc;
10274                 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10275         }
10276         for (i = 0; i < separator_num; i++)
10277                 *reg++ = SEPARATOR_VALUE;
10278
10279         return reg_num + separator_num;
10280 }
10281
10282 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10283 {
10284         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10285         int data_len_per_desc, data_len, bd_num, i;
10286         int bd_num_list[BD_LIST_MAX_NUM];
10287         int ret;
10288
10289         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10290         if (ret) {
10291                 dev_err(&hdev->pdev->dev,
10292                         "Get dfx reg bd num fail, status is %d.\n", ret);
10293                 return ret;
10294         }
10295
10296         data_len_per_desc = sizeof_field(struct hclge_desc, data);
10297         *len = 0;
10298         for (i = 0; i < dfx_reg_type_num; i++) {
10299                 bd_num = bd_num_list[i];
10300                 data_len = data_len_per_desc * bd_num;
10301                 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10302         }
10303
10304         return ret;
10305 }
10306
10307 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10308 {
10309         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10310         int bd_num, bd_num_max, buf_len, i;
10311         int bd_num_list[BD_LIST_MAX_NUM];
10312         struct hclge_desc *desc_src;
10313         u32 *reg = data;
10314         int ret;
10315
10316         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10317         if (ret) {
10318                 dev_err(&hdev->pdev->dev,
10319                         "Get dfx reg bd num fail, status is %d.\n", ret);
10320                 return ret;
10321         }
10322
10323         bd_num_max = bd_num_list[0];
10324         for (i = 1; i < dfx_reg_type_num; i++)
10325                 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10326
10327         buf_len = sizeof(*desc_src) * bd_num_max;
10328         desc_src = kzalloc(buf_len, GFP_KERNEL);
10329         if (!desc_src) {
10330                 dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__);
10331                 return -ENOMEM;
10332         }
10333
10334         for (i = 0; i < dfx_reg_type_num; i++) {
10335                 bd_num = bd_num_list[i];
10336                 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10337                                              hclge_dfx_reg_opcode_list[i]);
10338                 if (ret) {
10339                         dev_err(&hdev->pdev->dev,
10340                                 "Get dfx reg fail, status is %d.\n", ret);
10341                         break;
10342                 }
10343
10344                 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10345         }
10346
10347         kfree(desc_src);
10348         return ret;
10349 }
10350
10351 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10352                               struct hnae3_knic_private_info *kinfo)
10353 {
10354 #define HCLGE_RING_REG_OFFSET           0x200
10355 #define HCLGE_RING_INT_REG_OFFSET       0x4
10356
10357         int i, j, reg_num, separator_num;
10358         int data_num_sum;
10359         u32 *reg = data;
10360
10361         /* fetching per-PF registers valus from PF PCIe register space */
10362         reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
10363         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10364         for (i = 0; i < reg_num; i++)
10365                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
10366         for (i = 0; i < separator_num; i++)
10367                 *reg++ = SEPARATOR_VALUE;
10368         data_num_sum = reg_num + separator_num;
10369
10370         reg_num = ARRAY_SIZE(common_reg_addr_list);
10371         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10372         for (i = 0; i < reg_num; i++)
10373                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
10374         for (i = 0; i < separator_num; i++)
10375                 *reg++ = SEPARATOR_VALUE;
10376         data_num_sum += reg_num + separator_num;
10377
10378         reg_num = ARRAY_SIZE(ring_reg_addr_list);
10379         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10380         for (j = 0; j < kinfo->num_tqps; j++) {
10381                 for (i = 0; i < reg_num; i++)
10382                         *reg++ = hclge_read_dev(&hdev->hw,
10383                                                 ring_reg_addr_list[i] +
10384                                                 HCLGE_RING_REG_OFFSET * j);
10385                 for (i = 0; i < separator_num; i++)
10386                         *reg++ = SEPARATOR_VALUE;
10387         }
10388         data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
10389
10390         reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
10391         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10392         for (j = 0; j < hdev->num_msi_used - 1; j++) {
10393                 for (i = 0; i < reg_num; i++)
10394                         *reg++ = hclge_read_dev(&hdev->hw,
10395                                                 tqp_intr_reg_addr_list[i] +
10396                                                 HCLGE_RING_INT_REG_OFFSET * j);
10397                 for (i = 0; i < separator_num; i++)
10398                         *reg++ = SEPARATOR_VALUE;
10399         }
10400         data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
10401
10402         return data_num_sum;
10403 }
10404
10405 static int hclge_get_regs_len(struct hnae3_handle *handle)
10406 {
10407         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
10408         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10409         struct hclge_vport *vport = hclge_get_vport(handle);
10410         struct hclge_dev *hdev = vport->back;
10411         int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
10412         int regs_lines_32_bit, regs_lines_64_bit;
10413         int ret;
10414
10415         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10416         if (ret) {
10417                 dev_err(&hdev->pdev->dev,
10418                         "Get register number failed, ret = %d.\n", ret);
10419                 return ret;
10420         }
10421
10422         ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
10423         if (ret) {
10424                 dev_err(&hdev->pdev->dev,
10425                         "Get dfx reg len failed, ret = %d.\n", ret);
10426                 return ret;
10427         }
10428
10429         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
10430                 REG_SEPARATOR_LINE;
10431         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
10432                 REG_SEPARATOR_LINE;
10433         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10434                 REG_SEPARATOR_LINE;
10435         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10436                 REG_SEPARATOR_LINE;
10437         regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10438                 REG_SEPARATOR_LINE;
10439         regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10440                 REG_SEPARATOR_LINE;
10441
10442         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10443                 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10444                 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10445 }
10446
10447 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10448                            void *data)
10449 {
10450         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10451         struct hclge_vport *vport = hclge_get_vport(handle);
10452         struct hclge_dev *hdev = vport->back;
10453         u32 regs_num_32_bit, regs_num_64_bit;
10454         int i, reg_num, separator_num, ret;
10455         u32 *reg = data;
10456
10457         *version = hdev->fw_version;
10458
10459         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10460         if (ret) {
10461                 dev_err(&hdev->pdev->dev,
10462                         "Get register number failed, ret = %d.\n", ret);
10463                 return;
10464         }
10465
10466         reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
10467
10468         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
10469         if (ret) {
10470                 dev_err(&hdev->pdev->dev,
10471                         "Get 32 bit register failed, ret = %d.\n", ret);
10472                 return;
10473         }
10474         reg_num = regs_num_32_bit;
10475         reg += reg_num;
10476         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10477         for (i = 0; i < separator_num; i++)
10478                 *reg++ = SEPARATOR_VALUE;
10479
10480         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
10481         if (ret) {
10482                 dev_err(&hdev->pdev->dev,
10483                         "Get 64 bit register failed, ret = %d.\n", ret);
10484                 return;
10485         }
10486         reg_num = regs_num_64_bit * 2;
10487         reg += reg_num;
10488         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10489         for (i = 0; i < separator_num; i++)
10490                 *reg++ = SEPARATOR_VALUE;
10491
10492         ret = hclge_get_dfx_reg(hdev, reg);
10493         if (ret)
10494                 dev_err(&hdev->pdev->dev,
10495                         "Get dfx register failed, ret = %d.\n", ret);
10496 }
10497
10498 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
10499 {
10500         struct hclge_set_led_state_cmd *req;
10501         struct hclge_desc desc;
10502         int ret;
10503
10504         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
10505
10506         req = (struct hclge_set_led_state_cmd *)desc.data;
10507         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
10508                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
10509
10510         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10511         if (ret)
10512                 dev_err(&hdev->pdev->dev,
10513                         "Send set led state cmd error, ret =%d\n", ret);
10514
10515         return ret;
10516 }
10517
10518 enum hclge_led_status {
10519         HCLGE_LED_OFF,
10520         HCLGE_LED_ON,
10521         HCLGE_LED_NO_CHANGE = 0xFF,
10522 };
10523
10524 static int hclge_set_led_id(struct hnae3_handle *handle,
10525                             enum ethtool_phys_id_state status)
10526 {
10527         struct hclge_vport *vport = hclge_get_vport(handle);
10528         struct hclge_dev *hdev = vport->back;
10529
10530         switch (status) {
10531         case ETHTOOL_ID_ACTIVE:
10532                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
10533         case ETHTOOL_ID_INACTIVE:
10534                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
10535         default:
10536                 return -EINVAL;
10537         }
10538 }
10539
10540 static void hclge_get_link_mode(struct hnae3_handle *handle,
10541                                 unsigned long *supported,
10542                                 unsigned long *advertising)
10543 {
10544         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
10545         struct hclge_vport *vport = hclge_get_vport(handle);
10546         struct hclge_dev *hdev = vport->back;
10547         unsigned int idx = 0;
10548
10549         for (; idx < size; idx++) {
10550                 supported[idx] = hdev->hw.mac.supported[idx];
10551                 advertising[idx] = hdev->hw.mac.advertising[idx];
10552         }
10553 }
10554
10555 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
10556 {
10557         struct hclge_vport *vport = hclge_get_vport(handle);
10558         struct hclge_dev *hdev = vport->back;
10559
10560         return hclge_config_gro(hdev, enable);
10561 }
10562
10563 static const struct hnae3_ae_ops hclge_ops = {
10564         .init_ae_dev = hclge_init_ae_dev,
10565         .uninit_ae_dev = hclge_uninit_ae_dev,
10566         .flr_prepare = hclge_flr_prepare,
10567         .flr_done = hclge_flr_done,
10568         .init_client_instance = hclge_init_client_instance,
10569         .uninit_client_instance = hclge_uninit_client_instance,
10570         .map_ring_to_vector = hclge_map_ring_to_vector,
10571         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
10572         .get_vector = hclge_get_vector,
10573         .put_vector = hclge_put_vector,
10574         .set_promisc_mode = hclge_set_promisc_mode,
10575         .set_loopback = hclge_set_loopback,
10576         .start = hclge_ae_start,
10577         .stop = hclge_ae_stop,
10578         .client_start = hclge_client_start,
10579         .client_stop = hclge_client_stop,
10580         .get_status = hclge_get_status,
10581         .get_ksettings_an_result = hclge_get_ksettings_an_result,
10582         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
10583         .get_media_type = hclge_get_media_type,
10584         .check_port_speed = hclge_check_port_speed,
10585         .get_fec = hclge_get_fec,
10586         .set_fec = hclge_set_fec,
10587         .get_rss_key_size = hclge_get_rss_key_size,
10588         .get_rss_indir_size = hclge_get_rss_indir_size,
10589         .get_rss = hclge_get_rss,
10590         .set_rss = hclge_set_rss,
10591         .set_rss_tuple = hclge_set_rss_tuple,
10592         .get_rss_tuple = hclge_get_rss_tuple,
10593         .get_tc_size = hclge_get_tc_size,
10594         .get_mac_addr = hclge_get_mac_addr,
10595         .set_mac_addr = hclge_set_mac_addr,
10596         .do_ioctl = hclge_do_ioctl,
10597         .add_uc_addr = hclge_add_uc_addr,
10598         .rm_uc_addr = hclge_rm_uc_addr,
10599         .add_mc_addr = hclge_add_mc_addr,
10600         .rm_mc_addr = hclge_rm_mc_addr,
10601         .set_autoneg = hclge_set_autoneg,
10602         .get_autoneg = hclge_get_autoneg,
10603         .restart_autoneg = hclge_restart_autoneg,
10604         .halt_autoneg = hclge_halt_autoneg,
10605         .get_pauseparam = hclge_get_pauseparam,
10606         .set_pauseparam = hclge_set_pauseparam,
10607         .set_mtu = hclge_set_mtu,
10608         .reset_queue = hclge_reset_tqp,
10609         .get_stats = hclge_get_stats,
10610         .get_mac_stats = hclge_get_mac_stat,
10611         .update_stats = hclge_update_stats,
10612         .get_strings = hclge_get_strings,
10613         .get_sset_count = hclge_get_sset_count,
10614         .get_fw_version = hclge_get_fw_version,
10615         .get_mdix_mode = hclge_get_mdix_mode,
10616         .enable_vlan_filter = hclge_enable_vlan_filter,
10617         .set_vlan_filter = hclge_set_vlan_filter,
10618         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
10619         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
10620         .reset_event = hclge_reset_event,
10621         .get_reset_level = hclge_get_reset_level,
10622         .set_default_reset_request = hclge_set_def_reset_request,
10623         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
10624         .set_channels = hclge_set_channels,
10625         .get_channels = hclge_get_channels,
10626         .get_regs_len = hclge_get_regs_len,
10627         .get_regs = hclge_get_regs,
10628         .set_led_id = hclge_set_led_id,
10629         .get_link_mode = hclge_get_link_mode,
10630         .add_fd_entry = hclge_add_fd_entry,
10631         .del_fd_entry = hclge_del_fd_entry,
10632         .del_all_fd_entries = hclge_del_all_fd_entries,
10633         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
10634         .get_fd_rule_info = hclge_get_fd_rule_info,
10635         .get_fd_all_rules = hclge_get_all_rules,
10636         .restore_fd_rules = hclge_restore_fd_entries,
10637         .enable_fd = hclge_enable_fd,
10638         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
10639         .dbg_run_cmd = hclge_dbg_run_cmd,
10640         .handle_hw_ras_error = hclge_handle_hw_ras_error,
10641         .get_hw_reset_stat = hclge_get_hw_reset_stat,
10642         .ae_dev_resetting = hclge_ae_dev_resetting,
10643         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
10644         .set_gro_en = hclge_gro_en,
10645         .get_global_queue_id = hclge_covert_handle_qid_global,
10646         .set_timer_task = hclge_set_timer_task,
10647         .mac_connect_phy = hclge_mac_connect_phy,
10648         .mac_disconnect_phy = hclge_mac_disconnect_phy,
10649         .restore_vlan_table = hclge_restore_vlan_table,
10650         .get_vf_config = hclge_get_vf_config,
10651         .set_vf_link_state = hclge_set_vf_link_state,
10652         .set_vf_spoofchk = hclge_set_vf_spoofchk,
10653         .set_vf_trust = hclge_set_vf_trust,
10654         .set_vf_rate = hclge_set_vf_rate,
10655         .set_vf_mac = hclge_set_vf_mac,
10656 };
10657
10658 static struct hnae3_ae_algo ae_algo = {
10659         .ops = &hclge_ops,
10660         .pdev_id_table = ae_algo_pci_tbl,
10661 };
10662
10663 static int hclge_init(void)
10664 {
10665         pr_info("%s is initializing\n", HCLGE_NAME);
10666
10667         hclge_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGE_NAME);
10668         if (!hclge_wq) {
10669                 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
10670                 return -ENOMEM;
10671         }
10672
10673         hnae3_register_ae_algo(&ae_algo);
10674
10675         return 0;
10676 }
10677
10678 static void hclge_exit(void)
10679 {
10680         hnae3_unregister_ae_algo(&ae_algo);
10681         destroy_workqueue(hclge_wq);
10682 }
10683 module_init(hclge_init);
10684 module_exit(hclge_exit);
10685
10686 MODULE_LICENSE("GPL");
10687 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
10688 MODULE_DESCRIPTION("HCLGE Driver");
10689 MODULE_VERSION(HCLGE_MOD_VERSION);