d81c9760872660b8c4743c6ddcd2086cb14bad21
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME                      "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT     256U
31 #define HCLGE_BUF_MUL_BY        2
32 #define HCLGE_BUF_DIV_BY        2
33 #define NEED_RESERVE_TC_NUM     2
34 #define BUF_MAX_PERCENT         100
35 #define BUF_RESERVE_PERCENT     90
36
37 #define HCLGE_RESET_MAX_FAIL_CNT        5
38 #define HCLGE_RESET_SYNC_TIME           100
39 #define HCLGE_PF_RESET_SYNC_TIME        20
40 #define HCLGE_PF_RESET_SYNC_CNT         1500
41
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET        1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
46 #define HCLGE_DFX_IGU_BD_OFFSET         4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
49 #define HCLGE_DFX_NCSI_BD_OFFSET        7
50 #define HCLGE_DFX_RTC_BD_OFFSET         8
51 #define HCLGE_DFX_PPP_BD_OFFSET         9
52 #define HCLGE_DFX_RCB_BD_OFFSET         10
53 #define HCLGE_DFX_TQP_BD_OFFSET         11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
55
56 #define HCLGE_LINK_STATUS_MS    10
57
58 #define HCLGE_VF_VPORT_START_NUM        1
59
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
66                                u16 *allocated_size, bool is_alloc);
67 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
68 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
69 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
70                                                    unsigned long *addr);
71 static int hclge_set_default_loopback(struct hclge_dev *hdev);
72
73 static struct hnae3_ae_algo ae_algo;
74
75 static struct workqueue_struct *hclge_wq;
76
77 static const struct pci_device_id ae_algo_pci_tbl[] = {
78         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
79         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
80         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
81         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
82         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
83         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
84         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
85         /* required last entry */
86         {0, }
87 };
88
89 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
90
91 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
92                                          HCLGE_CMDQ_TX_ADDR_H_REG,
93                                          HCLGE_CMDQ_TX_DEPTH_REG,
94                                          HCLGE_CMDQ_TX_TAIL_REG,
95                                          HCLGE_CMDQ_TX_HEAD_REG,
96                                          HCLGE_CMDQ_RX_ADDR_L_REG,
97                                          HCLGE_CMDQ_RX_ADDR_H_REG,
98                                          HCLGE_CMDQ_RX_DEPTH_REG,
99                                          HCLGE_CMDQ_RX_TAIL_REG,
100                                          HCLGE_CMDQ_RX_HEAD_REG,
101                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
102                                          HCLGE_CMDQ_INTR_STS_REG,
103                                          HCLGE_CMDQ_INTR_EN_REG,
104                                          HCLGE_CMDQ_INTR_GEN_REG};
105
106 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
107                                            HCLGE_VECTOR0_OTER_EN_REG,
108                                            HCLGE_MISC_RESET_STS_REG,
109                                            HCLGE_MISC_VECTOR_INT_STS,
110                                            HCLGE_GLOBAL_RESET_REG,
111                                            HCLGE_FUN_RST_ING,
112                                            HCLGE_GRO_EN_REG};
113
114 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
115                                          HCLGE_RING_RX_ADDR_H_REG,
116                                          HCLGE_RING_RX_BD_NUM_REG,
117                                          HCLGE_RING_RX_BD_LENGTH_REG,
118                                          HCLGE_RING_RX_MERGE_EN_REG,
119                                          HCLGE_RING_RX_TAIL_REG,
120                                          HCLGE_RING_RX_HEAD_REG,
121                                          HCLGE_RING_RX_FBD_NUM_REG,
122                                          HCLGE_RING_RX_OFFSET_REG,
123                                          HCLGE_RING_RX_FBD_OFFSET_REG,
124                                          HCLGE_RING_RX_STASH_REG,
125                                          HCLGE_RING_RX_BD_ERR_REG,
126                                          HCLGE_RING_TX_ADDR_L_REG,
127                                          HCLGE_RING_TX_ADDR_H_REG,
128                                          HCLGE_RING_TX_BD_NUM_REG,
129                                          HCLGE_RING_TX_PRIORITY_REG,
130                                          HCLGE_RING_TX_TC_REG,
131                                          HCLGE_RING_TX_MERGE_EN_REG,
132                                          HCLGE_RING_TX_TAIL_REG,
133                                          HCLGE_RING_TX_HEAD_REG,
134                                          HCLGE_RING_TX_FBD_NUM_REG,
135                                          HCLGE_RING_TX_OFFSET_REG,
136                                          HCLGE_RING_TX_EBD_NUM_REG,
137                                          HCLGE_RING_TX_EBD_OFFSET_REG,
138                                          HCLGE_RING_TX_BD_ERR_REG,
139                                          HCLGE_RING_EN_REG};
140
141 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
142                                              HCLGE_TQP_INTR_GL0_REG,
143                                              HCLGE_TQP_INTR_GL1_REG,
144                                              HCLGE_TQP_INTR_GL2_REG,
145                                              HCLGE_TQP_INTR_RL_REG};
146
147 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
148         "App    Loopback test",
149         "Serdes serial Loopback test",
150         "Serdes parallel Loopback test",
151         "Phy    Loopback test"
152 };
153
154 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
155         {"mac_tx_mac_pause_num",
156                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
157         {"mac_rx_mac_pause_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
159         {"mac_tx_control_pkt_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
161         {"mac_rx_control_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
163         {"mac_tx_pfc_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
165         {"mac_tx_pfc_pri0_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
167         {"mac_tx_pfc_pri1_pkt_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
169         {"mac_tx_pfc_pri2_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
171         {"mac_tx_pfc_pri3_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
173         {"mac_tx_pfc_pri4_pkt_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
175         {"mac_tx_pfc_pri5_pkt_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
177         {"mac_tx_pfc_pri6_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
179         {"mac_tx_pfc_pri7_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
181         {"mac_rx_pfc_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
183         {"mac_rx_pfc_pri0_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
185         {"mac_rx_pfc_pri1_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
187         {"mac_rx_pfc_pri2_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
189         {"mac_rx_pfc_pri3_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
191         {"mac_rx_pfc_pri4_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
193         {"mac_rx_pfc_pri5_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
195         {"mac_rx_pfc_pri6_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
197         {"mac_rx_pfc_pri7_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
199         {"mac_tx_total_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
201         {"mac_tx_total_oct_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
203         {"mac_tx_good_pkt_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
205         {"mac_tx_bad_pkt_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
207         {"mac_tx_good_oct_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
209         {"mac_tx_bad_oct_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
211         {"mac_tx_uni_pkt_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
213         {"mac_tx_multi_pkt_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
215         {"mac_tx_broad_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
217         {"mac_tx_undersize_pkt_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
219         {"mac_tx_oversize_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
221         {"mac_tx_64_oct_pkt_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
223         {"mac_tx_65_127_oct_pkt_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
225         {"mac_tx_128_255_oct_pkt_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
227         {"mac_tx_256_511_oct_pkt_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
229         {"mac_tx_512_1023_oct_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
231         {"mac_tx_1024_1518_oct_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
233         {"mac_tx_1519_2047_oct_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
235         {"mac_tx_2048_4095_oct_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
237         {"mac_tx_4096_8191_oct_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
239         {"mac_tx_8192_9216_oct_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
241         {"mac_tx_9217_12287_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
243         {"mac_tx_12288_16383_oct_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
245         {"mac_tx_1519_max_good_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
247         {"mac_tx_1519_max_bad_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
249         {"mac_rx_total_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
251         {"mac_rx_total_oct_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
253         {"mac_rx_good_pkt_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
255         {"mac_rx_bad_pkt_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
257         {"mac_rx_good_oct_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
259         {"mac_rx_bad_oct_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
261         {"mac_rx_uni_pkt_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
263         {"mac_rx_multi_pkt_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
265         {"mac_rx_broad_pkt_num",
266                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
267         {"mac_rx_undersize_pkt_num",
268                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
269         {"mac_rx_oversize_pkt_num",
270                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
271         {"mac_rx_64_oct_pkt_num",
272                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
273         {"mac_rx_65_127_oct_pkt_num",
274                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
275         {"mac_rx_128_255_oct_pkt_num",
276                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
277         {"mac_rx_256_511_oct_pkt_num",
278                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
279         {"mac_rx_512_1023_oct_pkt_num",
280                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
281         {"mac_rx_1024_1518_oct_pkt_num",
282                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
283         {"mac_rx_1519_2047_oct_pkt_num",
284                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
285         {"mac_rx_2048_4095_oct_pkt_num",
286                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
287         {"mac_rx_4096_8191_oct_pkt_num",
288                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
289         {"mac_rx_8192_9216_oct_pkt_num",
290                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
291         {"mac_rx_9217_12287_oct_pkt_num",
292                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
293         {"mac_rx_12288_16383_oct_pkt_num",
294                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
295         {"mac_rx_1519_max_good_pkt_num",
296                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
297         {"mac_rx_1519_max_bad_pkt_num",
298                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
299
300         {"mac_tx_fragment_pkt_num",
301                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
302         {"mac_tx_undermin_pkt_num",
303                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
304         {"mac_tx_jabber_pkt_num",
305                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
306         {"mac_tx_err_all_pkt_num",
307                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
308         {"mac_tx_from_app_good_pkt_num",
309                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
310         {"mac_tx_from_app_bad_pkt_num",
311                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
312         {"mac_rx_fragment_pkt_num",
313                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
314         {"mac_rx_undermin_pkt_num",
315                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
316         {"mac_rx_jabber_pkt_num",
317                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
318         {"mac_rx_fcs_err_pkt_num",
319                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
320         {"mac_rx_send_app_good_pkt_num",
321                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
322         {"mac_rx_send_app_bad_pkt_num",
323                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
324 };
325
326 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
327         {
328                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
329                 .ethter_type = cpu_to_le16(ETH_P_LLDP),
330                 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
331                 .i_port_bitmap = 0x1,
332         },
333 };
334
335 static const u8 hclge_hash_key[] = {
336         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
337         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
338         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
339         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
340         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
341 };
342
343 static const u32 hclge_dfx_bd_offset_list[] = {
344         HCLGE_DFX_BIOS_BD_OFFSET,
345         HCLGE_DFX_SSU_0_BD_OFFSET,
346         HCLGE_DFX_SSU_1_BD_OFFSET,
347         HCLGE_DFX_IGU_BD_OFFSET,
348         HCLGE_DFX_RPU_0_BD_OFFSET,
349         HCLGE_DFX_RPU_1_BD_OFFSET,
350         HCLGE_DFX_NCSI_BD_OFFSET,
351         HCLGE_DFX_RTC_BD_OFFSET,
352         HCLGE_DFX_PPP_BD_OFFSET,
353         HCLGE_DFX_RCB_BD_OFFSET,
354         HCLGE_DFX_TQP_BD_OFFSET,
355         HCLGE_DFX_SSU_2_BD_OFFSET
356 };
357
358 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
359         HCLGE_OPC_DFX_BIOS_COMMON_REG,
360         HCLGE_OPC_DFX_SSU_REG_0,
361         HCLGE_OPC_DFX_SSU_REG_1,
362         HCLGE_OPC_DFX_IGU_EGU_REG,
363         HCLGE_OPC_DFX_RPU_REG_0,
364         HCLGE_OPC_DFX_RPU_REG_1,
365         HCLGE_OPC_DFX_NCSI_REG,
366         HCLGE_OPC_DFX_RTC_REG,
367         HCLGE_OPC_DFX_PPP_REG,
368         HCLGE_OPC_DFX_RCB_REG,
369         HCLGE_OPC_DFX_TQP_REG,
370         HCLGE_OPC_DFX_SSU_REG_2
371 };
372
373 static const struct key_info meta_data_key_info[] = {
374         { PACKET_TYPE_ID, 6},
375         { IP_FRAGEMENT, 1},
376         { ROCE_TYPE, 1},
377         { NEXT_KEY, 5},
378         { VLAN_NUMBER, 2},
379         { SRC_VPORT, 12},
380         { DST_VPORT, 12},
381         { TUNNEL_PACKET, 1},
382 };
383
384 static const struct key_info tuple_key_info[] = {
385         { OUTER_DST_MAC, 48},
386         { OUTER_SRC_MAC, 48},
387         { OUTER_VLAN_TAG_FST, 16},
388         { OUTER_VLAN_TAG_SEC, 16},
389         { OUTER_ETH_TYPE, 16},
390         { OUTER_L2_RSV, 16},
391         { OUTER_IP_TOS, 8},
392         { OUTER_IP_PROTO, 8},
393         { OUTER_SRC_IP, 32},
394         { OUTER_DST_IP, 32},
395         { OUTER_L3_RSV, 16},
396         { OUTER_SRC_PORT, 16},
397         { OUTER_DST_PORT, 16},
398         { OUTER_L4_RSV, 32},
399         { OUTER_TUN_VNI, 24},
400         { OUTER_TUN_FLOW_ID, 8},
401         { INNER_DST_MAC, 48},
402         { INNER_SRC_MAC, 48},
403         { INNER_VLAN_TAG_FST, 16},
404         { INNER_VLAN_TAG_SEC, 16},
405         { INNER_ETH_TYPE, 16},
406         { INNER_L2_RSV, 16},
407         { INNER_IP_TOS, 8},
408         { INNER_IP_PROTO, 8},
409         { INNER_SRC_IP, 32},
410         { INNER_DST_IP, 32},
411         { INNER_L3_RSV, 16},
412         { INNER_SRC_PORT, 16},
413         { INNER_DST_PORT, 16},
414         { INNER_L4_RSV, 32},
415 };
416
417 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
418 {
419 #define HCLGE_MAC_CMD_NUM 21
420
421         u64 *data = (u64 *)(&hdev->mac_stats);
422         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
423         __le64 *desc_data;
424         int i, k, n;
425         int ret;
426
427         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
428         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
429         if (ret) {
430                 dev_err(&hdev->pdev->dev,
431                         "Get MAC pkt stats fail, status = %d.\n", ret);
432
433                 return ret;
434         }
435
436         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
437                 /* for special opcode 0032, only the first desc has the head */
438                 if (unlikely(i == 0)) {
439                         desc_data = (__le64 *)(&desc[i].data[0]);
440                         n = HCLGE_RD_FIRST_STATS_NUM;
441                 } else {
442                         desc_data = (__le64 *)(&desc[i]);
443                         n = HCLGE_RD_OTHER_STATS_NUM;
444                 }
445
446                 for (k = 0; k < n; k++) {
447                         *data += le64_to_cpu(*desc_data);
448                         data++;
449                         desc_data++;
450                 }
451         }
452
453         return 0;
454 }
455
456 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
457 {
458         u64 *data = (u64 *)(&hdev->mac_stats);
459         struct hclge_desc *desc;
460         __le64 *desc_data;
461         u16 i, k, n;
462         int ret;
463
464         /* This may be called inside atomic sections,
465          * so GFP_ATOMIC is more suitalbe here
466          */
467         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
468         if (!desc)
469                 return -ENOMEM;
470
471         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
472         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
473         if (ret) {
474                 kfree(desc);
475                 return ret;
476         }
477
478         for (i = 0; i < desc_num; i++) {
479                 /* for special opcode 0034, only the first desc has the head */
480                 if (i == 0) {
481                         desc_data = (__le64 *)(&desc[i].data[0]);
482                         n = HCLGE_RD_FIRST_STATS_NUM;
483                 } else {
484                         desc_data = (__le64 *)(&desc[i]);
485                         n = HCLGE_RD_OTHER_STATS_NUM;
486                 }
487
488                 for (k = 0; k < n; k++) {
489                         *data += le64_to_cpu(*desc_data);
490                         data++;
491                         desc_data++;
492                 }
493         }
494
495         kfree(desc);
496
497         return 0;
498 }
499
500 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
501 {
502         struct hclge_desc desc;
503         __le32 *desc_data;
504         u32 reg_num;
505         int ret;
506
507         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
508         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
509         if (ret)
510                 return ret;
511
512         desc_data = (__le32 *)(&desc.data[0]);
513         reg_num = le32_to_cpu(*desc_data);
514
515         *desc_num = 1 + ((reg_num - 3) >> 2) +
516                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
517
518         return 0;
519 }
520
521 static int hclge_mac_update_stats(struct hclge_dev *hdev)
522 {
523         u32 desc_num;
524         int ret;
525
526         ret = hclge_mac_query_reg_num(hdev, &desc_num);
527
528         /* The firmware supports the new statistics acquisition method */
529         if (!ret)
530                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
531         else if (ret == -EOPNOTSUPP)
532                 ret = hclge_mac_update_stats_defective(hdev);
533         else
534                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
535
536         return ret;
537 }
538
539 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
540 {
541         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
542         struct hclge_vport *vport = hclge_get_vport(handle);
543         struct hclge_dev *hdev = vport->back;
544         struct hnae3_queue *queue;
545         struct hclge_desc desc[1];
546         struct hclge_tqp *tqp;
547         int ret, i;
548
549         for (i = 0; i < kinfo->num_tqps; i++) {
550                 queue = handle->kinfo.tqp[i];
551                 tqp = container_of(queue, struct hclge_tqp, q);
552                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
553                 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
554                                            true);
555
556                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
557                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
558                 if (ret) {
559                         dev_err(&hdev->pdev->dev,
560                                 "Query tqp stat fail, status = %d,queue = %d\n",
561                                 ret, i);
562                         return ret;
563                 }
564                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
565                         le32_to_cpu(desc[0].data[1]);
566         }
567
568         for (i = 0; i < kinfo->num_tqps; i++) {
569                 queue = handle->kinfo.tqp[i];
570                 tqp = container_of(queue, struct hclge_tqp, q);
571                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
572                 hclge_cmd_setup_basic_desc(&desc[0],
573                                            HCLGE_OPC_QUERY_TX_STATUS,
574                                            true);
575
576                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
577                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
578                 if (ret) {
579                         dev_err(&hdev->pdev->dev,
580                                 "Query tqp stat fail, status = %d,queue = %d\n",
581                                 ret, i);
582                         return ret;
583                 }
584                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
585                         le32_to_cpu(desc[0].data[1]);
586         }
587
588         return 0;
589 }
590
591 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
592 {
593         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
594         struct hclge_tqp *tqp;
595         u64 *buff = data;
596         int i;
597
598         for (i = 0; i < kinfo->num_tqps; i++) {
599                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
600                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
601         }
602
603         for (i = 0; i < kinfo->num_tqps; i++) {
604                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
605                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
606         }
607
608         return buff;
609 }
610
611 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
612 {
613         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
614
615         /* each tqp has TX & RX two queues */
616         return kinfo->num_tqps * (2);
617 }
618
619 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
620 {
621         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
622         u8 *buff = data;
623         int i = 0;
624
625         for (i = 0; i < kinfo->num_tqps; i++) {
626                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
627                         struct hclge_tqp, q);
628                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
629                          tqp->index);
630                 buff = buff + ETH_GSTRING_LEN;
631         }
632
633         for (i = 0; i < kinfo->num_tqps; i++) {
634                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
635                         struct hclge_tqp, q);
636                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
637                          tqp->index);
638                 buff = buff + ETH_GSTRING_LEN;
639         }
640
641         return buff;
642 }
643
644 static u64 *hclge_comm_get_stats(const void *comm_stats,
645                                  const struct hclge_comm_stats_str strs[],
646                                  int size, u64 *data)
647 {
648         u64 *buf = data;
649         u32 i;
650
651         for (i = 0; i < size; i++)
652                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
653
654         return buf + size;
655 }
656
657 static u8 *hclge_comm_get_strings(u32 stringset,
658                                   const struct hclge_comm_stats_str strs[],
659                                   int size, u8 *data)
660 {
661         char *buff = (char *)data;
662         u32 i;
663
664         if (stringset != ETH_SS_STATS)
665                 return buff;
666
667         for (i = 0; i < size; i++) {
668                 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
669                 buff = buff + ETH_GSTRING_LEN;
670         }
671
672         return (u8 *)buff;
673 }
674
675 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
676 {
677         struct hnae3_handle *handle;
678         int status;
679
680         handle = &hdev->vport[0].nic;
681         if (handle->client) {
682                 status = hclge_tqps_update_stats(handle);
683                 if (status) {
684                         dev_err(&hdev->pdev->dev,
685                                 "Update TQPS stats fail, status = %d.\n",
686                                 status);
687                 }
688         }
689
690         status = hclge_mac_update_stats(hdev);
691         if (status)
692                 dev_err(&hdev->pdev->dev,
693                         "Update MAC stats fail, status = %d.\n", status);
694 }
695
696 static void hclge_update_stats(struct hnae3_handle *handle,
697                                struct net_device_stats *net_stats)
698 {
699         struct hclge_vport *vport = hclge_get_vport(handle);
700         struct hclge_dev *hdev = vport->back;
701         int status;
702
703         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
704                 return;
705
706         status = hclge_mac_update_stats(hdev);
707         if (status)
708                 dev_err(&hdev->pdev->dev,
709                         "Update MAC stats fail, status = %d.\n",
710                         status);
711
712         status = hclge_tqps_update_stats(handle);
713         if (status)
714                 dev_err(&hdev->pdev->dev,
715                         "Update TQPS stats fail, status = %d.\n",
716                         status);
717
718         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
719 }
720
721 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
722 {
723 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
724                 HNAE3_SUPPORT_PHY_LOOPBACK |\
725                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
726                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
727
728         struct hclge_vport *vport = hclge_get_vport(handle);
729         struct hclge_dev *hdev = vport->back;
730         int count = 0;
731
732         /* Loopback test support rules:
733          * mac: only GE mode support
734          * serdes: all mac mode will support include GE/XGE/LGE/CGE
735          * phy: only support when phy device exist on board
736          */
737         if (stringset == ETH_SS_TEST) {
738                 /* clear loopback bit flags at first */
739                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
740                 if (hdev->pdev->revision >= 0x21 ||
741                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
742                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
743                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
744                         count += 1;
745                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
746                 }
747
748                 count += 2;
749                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
750                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
751
752                 if (hdev->hw.mac.phydev) {
753                         count += 1;
754                         handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
755                 }
756
757         } else if (stringset == ETH_SS_STATS) {
758                 count = ARRAY_SIZE(g_mac_stats_string) +
759                         hclge_tqps_get_sset_count(handle, stringset);
760         }
761
762         return count;
763 }
764
765 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
766                               u8 *data)
767 {
768         u8 *p = (char *)data;
769         int size;
770
771         if (stringset == ETH_SS_STATS) {
772                 size = ARRAY_SIZE(g_mac_stats_string);
773                 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
774                                            size, p);
775                 p = hclge_tqps_get_strings(handle, p);
776         } else if (stringset == ETH_SS_TEST) {
777                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
778                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
779                                ETH_GSTRING_LEN);
780                         p += ETH_GSTRING_LEN;
781                 }
782                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
783                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
784                                ETH_GSTRING_LEN);
785                         p += ETH_GSTRING_LEN;
786                 }
787                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
788                         memcpy(p,
789                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
790                                ETH_GSTRING_LEN);
791                         p += ETH_GSTRING_LEN;
792                 }
793                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
794                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
795                                ETH_GSTRING_LEN);
796                         p += ETH_GSTRING_LEN;
797                 }
798         }
799 }
800
801 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
802 {
803         struct hclge_vport *vport = hclge_get_vport(handle);
804         struct hclge_dev *hdev = vport->back;
805         u64 *p;
806
807         p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
808                                  ARRAY_SIZE(g_mac_stats_string), data);
809         p = hclge_tqps_get_stats(handle, p);
810 }
811
812 static void hclge_get_mac_stat(struct hnae3_handle *handle,
813                                struct hns3_mac_stats *mac_stats)
814 {
815         struct hclge_vport *vport = hclge_get_vport(handle);
816         struct hclge_dev *hdev = vport->back;
817
818         hclge_update_stats(handle, NULL);
819
820         mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
821         mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
822 }
823
824 static int hclge_parse_func_status(struct hclge_dev *hdev,
825                                    struct hclge_func_status_cmd *status)
826 {
827         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
828                 return -EINVAL;
829
830         /* Set the pf to main pf */
831         if (status->pf_state & HCLGE_PF_STATE_MAIN)
832                 hdev->flag |= HCLGE_FLAG_MAIN;
833         else
834                 hdev->flag &= ~HCLGE_FLAG_MAIN;
835
836         return 0;
837 }
838
839 static int hclge_query_function_status(struct hclge_dev *hdev)
840 {
841 #define HCLGE_QUERY_MAX_CNT     5
842
843         struct hclge_func_status_cmd *req;
844         struct hclge_desc desc;
845         int timeout = 0;
846         int ret;
847
848         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
849         req = (struct hclge_func_status_cmd *)desc.data;
850
851         do {
852                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
853                 if (ret) {
854                         dev_err(&hdev->pdev->dev,
855                                 "query function status failed %d.\n", ret);
856                         return ret;
857                 }
858
859                 /* Check pf reset is done */
860                 if (req->pf_state)
861                         break;
862                 usleep_range(1000, 2000);
863         } while (timeout++ < HCLGE_QUERY_MAX_CNT);
864
865         ret = hclge_parse_func_status(hdev, req);
866
867         return ret;
868 }
869
870 static int hclge_query_pf_resource(struct hclge_dev *hdev)
871 {
872         struct hclge_pf_res_cmd *req;
873         struct hclge_desc desc;
874         int ret;
875
876         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
877         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
878         if (ret) {
879                 dev_err(&hdev->pdev->dev,
880                         "query pf resource failed %d.\n", ret);
881                 return ret;
882         }
883
884         req = (struct hclge_pf_res_cmd *)desc.data;
885         hdev->num_tqps = __le16_to_cpu(req->tqp_num);
886         hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
887
888         if (req->tx_buf_size)
889                 hdev->tx_buf_size =
890                         __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
891         else
892                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
893
894         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
895
896         if (req->dv_buf_size)
897                 hdev->dv_buf_size =
898                         __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
899         else
900                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
901
902         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
903
904         if (hnae3_dev_roce_supported(hdev)) {
905                 hdev->roce_base_msix_offset =
906                 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
907                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
908                 hdev->num_roce_msi =
909                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
910                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
911
912                 /* nic's msix numbers is always equals to the roce's. */
913                 hdev->num_nic_msi = hdev->num_roce_msi;
914
915                 /* PF should have NIC vectors and Roce vectors,
916                  * NIC vectors are queued before Roce vectors.
917                  */
918                 hdev->num_msi = hdev->num_roce_msi +
919                                 hdev->roce_base_msix_offset;
920         } else {
921                 hdev->num_msi =
922                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
923                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
924
925                 hdev->num_nic_msi = hdev->num_msi;
926         }
927
928         if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
929                 dev_err(&hdev->pdev->dev,
930                         "Just %u msi resources, not enough for pf(min:2).\n",
931                         hdev->num_nic_msi);
932                 return -EINVAL;
933         }
934
935         return 0;
936 }
937
938 static int hclge_parse_speed(int speed_cmd, int *speed)
939 {
940         switch (speed_cmd) {
941         case 6:
942                 *speed = HCLGE_MAC_SPEED_10M;
943                 break;
944         case 7:
945                 *speed = HCLGE_MAC_SPEED_100M;
946                 break;
947         case 0:
948                 *speed = HCLGE_MAC_SPEED_1G;
949                 break;
950         case 1:
951                 *speed = HCLGE_MAC_SPEED_10G;
952                 break;
953         case 2:
954                 *speed = HCLGE_MAC_SPEED_25G;
955                 break;
956         case 3:
957                 *speed = HCLGE_MAC_SPEED_40G;
958                 break;
959         case 4:
960                 *speed = HCLGE_MAC_SPEED_50G;
961                 break;
962         case 5:
963                 *speed = HCLGE_MAC_SPEED_100G;
964                 break;
965         default:
966                 return -EINVAL;
967         }
968
969         return 0;
970 }
971
972 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
973 {
974         struct hclge_vport *vport = hclge_get_vport(handle);
975         struct hclge_dev *hdev = vport->back;
976         u32 speed_ability = hdev->hw.mac.speed_ability;
977         u32 speed_bit = 0;
978
979         switch (speed) {
980         case HCLGE_MAC_SPEED_10M:
981                 speed_bit = HCLGE_SUPPORT_10M_BIT;
982                 break;
983         case HCLGE_MAC_SPEED_100M:
984                 speed_bit = HCLGE_SUPPORT_100M_BIT;
985                 break;
986         case HCLGE_MAC_SPEED_1G:
987                 speed_bit = HCLGE_SUPPORT_1G_BIT;
988                 break;
989         case HCLGE_MAC_SPEED_10G:
990                 speed_bit = HCLGE_SUPPORT_10G_BIT;
991                 break;
992         case HCLGE_MAC_SPEED_25G:
993                 speed_bit = HCLGE_SUPPORT_25G_BIT;
994                 break;
995         case HCLGE_MAC_SPEED_40G:
996                 speed_bit = HCLGE_SUPPORT_40G_BIT;
997                 break;
998         case HCLGE_MAC_SPEED_50G:
999                 speed_bit = HCLGE_SUPPORT_50G_BIT;
1000                 break;
1001         case HCLGE_MAC_SPEED_100G:
1002                 speed_bit = HCLGE_SUPPORT_100G_BIT;
1003                 break;
1004         default:
1005                 return -EINVAL;
1006         }
1007
1008         if (speed_bit & speed_ability)
1009                 return 0;
1010
1011         return -EINVAL;
1012 }
1013
1014 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1015 {
1016         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1017                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1018                                  mac->supported);
1019         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1020                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1021                                  mac->supported);
1022         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1023                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1024                                  mac->supported);
1025         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1026                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1027                                  mac->supported);
1028         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1029                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1030                                  mac->supported);
1031 }
1032
1033 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1034 {
1035         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1036                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1037                                  mac->supported);
1038         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1039                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1040                                  mac->supported);
1041         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1042                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1043                                  mac->supported);
1044         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1045                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1046                                  mac->supported);
1047         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1048                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1049                                  mac->supported);
1050 }
1051
1052 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1053 {
1054         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1055                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1056                                  mac->supported);
1057         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1058                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1059                                  mac->supported);
1060         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1061                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1062                                  mac->supported);
1063         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1064                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1065                                  mac->supported);
1066         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1067                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1068                                  mac->supported);
1069 }
1070
1071 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1072 {
1073         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1074                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1075                                  mac->supported);
1076         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1077                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1078                                  mac->supported);
1079         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1080                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1081                                  mac->supported);
1082         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1083                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1084                                  mac->supported);
1085         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1086                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1087                                  mac->supported);
1088         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1089                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1090                                  mac->supported);
1091 }
1092
1093 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1094 {
1095         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1096         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1097
1098         switch (mac->speed) {
1099         case HCLGE_MAC_SPEED_10G:
1100         case HCLGE_MAC_SPEED_40G:
1101                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1102                                  mac->supported);
1103                 mac->fec_ability =
1104                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1105                 break;
1106         case HCLGE_MAC_SPEED_25G:
1107         case HCLGE_MAC_SPEED_50G:
1108                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1109                                  mac->supported);
1110                 mac->fec_ability =
1111                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1112                         BIT(HNAE3_FEC_AUTO);
1113                 break;
1114         case HCLGE_MAC_SPEED_100G:
1115                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1116                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1117                 break;
1118         default:
1119                 mac->fec_ability = 0;
1120                 break;
1121         }
1122 }
1123
1124 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1125                                         u8 speed_ability)
1126 {
1127         struct hclge_mac *mac = &hdev->hw.mac;
1128
1129         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1130                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1131                                  mac->supported);
1132
1133         hclge_convert_setting_sr(mac, speed_ability);
1134         hclge_convert_setting_lr(mac, speed_ability);
1135         hclge_convert_setting_cr(mac, speed_ability);
1136         if (hdev->pdev->revision >= 0x21)
1137                 hclge_convert_setting_fec(mac);
1138
1139         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1140         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1141         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1142 }
1143
1144 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1145                                             u8 speed_ability)
1146 {
1147         struct hclge_mac *mac = &hdev->hw.mac;
1148
1149         hclge_convert_setting_kr(mac, speed_ability);
1150         if (hdev->pdev->revision >= 0x21)
1151                 hclge_convert_setting_fec(mac);
1152         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1153         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1154         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1155 }
1156
1157 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1158                                          u8 speed_ability)
1159 {
1160         unsigned long *supported = hdev->hw.mac.supported;
1161
1162         /* default to support all speed for GE port */
1163         if (!speed_ability)
1164                 speed_ability = HCLGE_SUPPORT_GE;
1165
1166         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1167                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1168                                  supported);
1169
1170         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1171                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1172                                  supported);
1173                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1174                                  supported);
1175         }
1176
1177         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1178                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1179                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1180         }
1181
1182         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1183         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1184         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1185         linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1186 }
1187
1188 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1189 {
1190         u8 media_type = hdev->hw.mac.media_type;
1191
1192         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1193                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1194         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1195                 hclge_parse_copper_link_mode(hdev, speed_ability);
1196         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1197                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1198 }
1199
1200 static u32 hclge_get_max_speed(u8 speed_ability)
1201 {
1202         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1203                 return HCLGE_MAC_SPEED_100G;
1204
1205         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1206                 return HCLGE_MAC_SPEED_50G;
1207
1208         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1209                 return HCLGE_MAC_SPEED_40G;
1210
1211         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1212                 return HCLGE_MAC_SPEED_25G;
1213
1214         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1215                 return HCLGE_MAC_SPEED_10G;
1216
1217         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1218                 return HCLGE_MAC_SPEED_1G;
1219
1220         if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1221                 return HCLGE_MAC_SPEED_100M;
1222
1223         if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1224                 return HCLGE_MAC_SPEED_10M;
1225
1226         return HCLGE_MAC_SPEED_1G;
1227 }
1228
1229 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1230 {
1231         struct hclge_cfg_param_cmd *req;
1232         u64 mac_addr_tmp_high;
1233         u64 mac_addr_tmp;
1234         unsigned int i;
1235
1236         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1237
1238         /* get the configuration */
1239         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1240                                               HCLGE_CFG_VMDQ_M,
1241                                               HCLGE_CFG_VMDQ_S);
1242         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1243                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1244         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1245                                             HCLGE_CFG_TQP_DESC_N_M,
1246                                             HCLGE_CFG_TQP_DESC_N_S);
1247
1248         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1249                                         HCLGE_CFG_PHY_ADDR_M,
1250                                         HCLGE_CFG_PHY_ADDR_S);
1251         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1252                                           HCLGE_CFG_MEDIA_TP_M,
1253                                           HCLGE_CFG_MEDIA_TP_S);
1254         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1255                                           HCLGE_CFG_RX_BUF_LEN_M,
1256                                           HCLGE_CFG_RX_BUF_LEN_S);
1257         /* get mac_address */
1258         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1259         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1260                                             HCLGE_CFG_MAC_ADDR_H_M,
1261                                             HCLGE_CFG_MAC_ADDR_H_S);
1262
1263         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1264
1265         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1266                                              HCLGE_CFG_DEFAULT_SPEED_M,
1267                                              HCLGE_CFG_DEFAULT_SPEED_S);
1268         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1269                                             HCLGE_CFG_RSS_SIZE_M,
1270                                             HCLGE_CFG_RSS_SIZE_S);
1271
1272         for (i = 0; i < ETH_ALEN; i++)
1273                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1274
1275         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1276         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1277
1278         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1279                                              HCLGE_CFG_SPEED_ABILITY_M,
1280                                              HCLGE_CFG_SPEED_ABILITY_S);
1281         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1282                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1283                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1284         if (!cfg->umv_space)
1285                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1286 }
1287
1288 /* hclge_get_cfg: query the static parameter from flash
1289  * @hdev: pointer to struct hclge_dev
1290  * @hcfg: the config structure to be getted
1291  */
1292 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1293 {
1294         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1295         struct hclge_cfg_param_cmd *req;
1296         unsigned int i;
1297         int ret;
1298
1299         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1300                 u32 offset = 0;
1301
1302                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1303                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1304                                            true);
1305                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1306                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1307                 /* Len should be united by 4 bytes when send to hardware */
1308                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1309                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1310                 req->offset = cpu_to_le32(offset);
1311         }
1312
1313         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1314         if (ret) {
1315                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1316                 return ret;
1317         }
1318
1319         hclge_parse_cfg(hcfg, desc);
1320
1321         return 0;
1322 }
1323
1324 static int hclge_get_cap(struct hclge_dev *hdev)
1325 {
1326         int ret;
1327
1328         ret = hclge_query_function_status(hdev);
1329         if (ret) {
1330                 dev_err(&hdev->pdev->dev,
1331                         "query function status error %d.\n", ret);
1332                 return ret;
1333         }
1334
1335         /* get pf resource */
1336         ret = hclge_query_pf_resource(hdev);
1337         if (ret)
1338                 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1339
1340         return ret;
1341 }
1342
1343 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1344 {
1345 #define HCLGE_MIN_TX_DESC       64
1346 #define HCLGE_MIN_RX_DESC       64
1347
1348         if (!is_kdump_kernel())
1349                 return;
1350
1351         dev_info(&hdev->pdev->dev,
1352                  "Running kdump kernel. Using minimal resources\n");
1353
1354         /* minimal queue pairs equals to the number of vports */
1355         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1356         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1357         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1358 }
1359
1360 static int hclge_configure(struct hclge_dev *hdev)
1361 {
1362         struct hclge_cfg cfg;
1363         unsigned int i;
1364         int ret;
1365
1366         ret = hclge_get_cfg(hdev, &cfg);
1367         if (ret) {
1368                 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1369                 return ret;
1370         }
1371
1372         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1373         hdev->base_tqp_pid = 0;
1374         hdev->rss_size_max = cfg.rss_size_max;
1375         hdev->rx_buf_len = cfg.rx_buf_len;
1376         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1377         hdev->hw.mac.media_type = cfg.media_type;
1378         hdev->hw.mac.phy_addr = cfg.phy_addr;
1379         hdev->num_tx_desc = cfg.tqp_desc_num;
1380         hdev->num_rx_desc = cfg.tqp_desc_num;
1381         hdev->tm_info.num_pg = 1;
1382         hdev->tc_max = cfg.tc_num;
1383         hdev->tm_info.hw_pfc_map = 0;
1384         hdev->wanted_umv_size = cfg.umv_space;
1385
1386         if (hnae3_dev_fd_supported(hdev)) {
1387                 hdev->fd_en = true;
1388                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1389         }
1390
1391         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1392         if (ret) {
1393                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1394                 return ret;
1395         }
1396
1397         hclge_parse_link_mode(hdev, cfg.speed_ability);
1398
1399         hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1400
1401         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1402             (hdev->tc_max < 1)) {
1403                 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1404                          hdev->tc_max);
1405                 hdev->tc_max = 1;
1406         }
1407
1408         /* Dev does not support DCB */
1409         if (!hnae3_dev_dcb_supported(hdev)) {
1410                 hdev->tc_max = 1;
1411                 hdev->pfc_max = 0;
1412         } else {
1413                 hdev->pfc_max = hdev->tc_max;
1414         }
1415
1416         hdev->tm_info.num_tc = 1;
1417
1418         /* Currently not support uncontiuous tc */
1419         for (i = 0; i < hdev->tm_info.num_tc; i++)
1420                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1421
1422         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1423
1424         hclge_init_kdump_kernel_config(hdev);
1425
1426         /* Set the init affinity based on pci func number */
1427         i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1428         i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1429         cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1430                         &hdev->affinity_mask);
1431
1432         return ret;
1433 }
1434
1435 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1436                             unsigned int tso_mss_max)
1437 {
1438         struct hclge_cfg_tso_status_cmd *req;
1439         struct hclge_desc desc;
1440         u16 tso_mss;
1441
1442         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1443
1444         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1445
1446         tso_mss = 0;
1447         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1448                         HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1449         req->tso_mss_min = cpu_to_le16(tso_mss);
1450
1451         tso_mss = 0;
1452         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1453                         HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1454         req->tso_mss_max = cpu_to_le16(tso_mss);
1455
1456         return hclge_cmd_send(&hdev->hw, &desc, 1);
1457 }
1458
1459 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1460 {
1461         struct hclge_cfg_gro_status_cmd *req;
1462         struct hclge_desc desc;
1463         int ret;
1464
1465         if (!hnae3_dev_gro_supported(hdev))
1466                 return 0;
1467
1468         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1469         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1470
1471         req->gro_en = cpu_to_le16(en ? 1 : 0);
1472
1473         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1474         if (ret)
1475                 dev_err(&hdev->pdev->dev,
1476                         "GRO hardware config cmd failed, ret = %d\n", ret);
1477
1478         return ret;
1479 }
1480
1481 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1482 {
1483         struct hclge_tqp *tqp;
1484         int i;
1485
1486         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1487                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1488         if (!hdev->htqp)
1489                 return -ENOMEM;
1490
1491         tqp = hdev->htqp;
1492
1493         for (i = 0; i < hdev->num_tqps; i++) {
1494                 tqp->dev = &hdev->pdev->dev;
1495                 tqp->index = i;
1496
1497                 tqp->q.ae_algo = &ae_algo;
1498                 tqp->q.buf_size = hdev->rx_buf_len;
1499                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1500                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1501                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1502                         i * HCLGE_TQP_REG_SIZE;
1503
1504                 tqp++;
1505         }
1506
1507         return 0;
1508 }
1509
1510 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1511                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1512 {
1513         struct hclge_tqp_map_cmd *req;
1514         struct hclge_desc desc;
1515         int ret;
1516
1517         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1518
1519         req = (struct hclge_tqp_map_cmd *)desc.data;
1520         req->tqp_id = cpu_to_le16(tqp_pid);
1521         req->tqp_vf = func_id;
1522         req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1523         if (!is_pf)
1524                 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1525         req->tqp_vid = cpu_to_le16(tqp_vid);
1526
1527         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1528         if (ret)
1529                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1530
1531         return ret;
1532 }
1533
1534 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1535 {
1536         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1537         struct hclge_dev *hdev = vport->back;
1538         int i, alloced;
1539
1540         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1541              alloced < num_tqps; i++) {
1542                 if (!hdev->htqp[i].alloced) {
1543                         hdev->htqp[i].q.handle = &vport->nic;
1544                         hdev->htqp[i].q.tqp_index = alloced;
1545                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1546                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1547                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1548                         hdev->htqp[i].alloced = true;
1549                         alloced++;
1550                 }
1551         }
1552         vport->alloc_tqps = alloced;
1553         kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1554                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1555
1556         /* ensure one to one mapping between irq and queue at default */
1557         kinfo->rss_size = min_t(u16, kinfo->rss_size,
1558                                 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1559
1560         return 0;
1561 }
1562
1563 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1564                             u16 num_tx_desc, u16 num_rx_desc)
1565
1566 {
1567         struct hnae3_handle *nic = &vport->nic;
1568         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1569         struct hclge_dev *hdev = vport->back;
1570         int ret;
1571
1572         kinfo->num_tx_desc = num_tx_desc;
1573         kinfo->num_rx_desc = num_rx_desc;
1574
1575         kinfo->rx_buf_len = hdev->rx_buf_len;
1576
1577         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1578                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1579         if (!kinfo->tqp)
1580                 return -ENOMEM;
1581
1582         ret = hclge_assign_tqp(vport, num_tqps);
1583         if (ret)
1584                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1585
1586         return ret;
1587 }
1588
1589 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1590                                   struct hclge_vport *vport)
1591 {
1592         struct hnae3_handle *nic = &vport->nic;
1593         struct hnae3_knic_private_info *kinfo;
1594         u16 i;
1595
1596         kinfo = &nic->kinfo;
1597         for (i = 0; i < vport->alloc_tqps; i++) {
1598                 struct hclge_tqp *q =
1599                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1600                 bool is_pf;
1601                 int ret;
1602
1603                 is_pf = !(vport->vport_id);
1604                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1605                                              i, is_pf);
1606                 if (ret)
1607                         return ret;
1608         }
1609
1610         return 0;
1611 }
1612
1613 static int hclge_map_tqp(struct hclge_dev *hdev)
1614 {
1615         struct hclge_vport *vport = hdev->vport;
1616         u16 i, num_vport;
1617
1618         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1619         for (i = 0; i < num_vport; i++) {
1620                 int ret;
1621
1622                 ret = hclge_map_tqp_to_vport(hdev, vport);
1623                 if (ret)
1624                         return ret;
1625
1626                 vport++;
1627         }
1628
1629         return 0;
1630 }
1631
1632 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1633 {
1634         struct hnae3_handle *nic = &vport->nic;
1635         struct hclge_dev *hdev = vport->back;
1636         int ret;
1637
1638         nic->pdev = hdev->pdev;
1639         nic->ae_algo = &ae_algo;
1640         nic->numa_node_mask = hdev->numa_node_mask;
1641
1642         ret = hclge_knic_setup(vport, num_tqps,
1643                                hdev->num_tx_desc, hdev->num_rx_desc);
1644         if (ret)
1645                 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1646
1647         return ret;
1648 }
1649
1650 static int hclge_alloc_vport(struct hclge_dev *hdev)
1651 {
1652         struct pci_dev *pdev = hdev->pdev;
1653         struct hclge_vport *vport;
1654         u32 tqp_main_vport;
1655         u32 tqp_per_vport;
1656         int num_vport, i;
1657         int ret;
1658
1659         /* We need to alloc a vport for main NIC of PF */
1660         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1661
1662         if (hdev->num_tqps < num_vport) {
1663                 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1664                         hdev->num_tqps, num_vport);
1665                 return -EINVAL;
1666         }
1667
1668         /* Alloc the same number of TQPs for every vport */
1669         tqp_per_vport = hdev->num_tqps / num_vport;
1670         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1671
1672         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1673                              GFP_KERNEL);
1674         if (!vport)
1675                 return -ENOMEM;
1676
1677         hdev->vport = vport;
1678         hdev->num_alloc_vport = num_vport;
1679
1680         if (IS_ENABLED(CONFIG_PCI_IOV))
1681                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1682
1683         for (i = 0; i < num_vport; i++) {
1684                 vport->back = hdev;
1685                 vport->vport_id = i;
1686                 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1687                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1688                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1689                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1690                 INIT_LIST_HEAD(&vport->vlan_list);
1691                 INIT_LIST_HEAD(&vport->uc_mac_list);
1692                 INIT_LIST_HEAD(&vport->mc_mac_list);
1693
1694                 if (i == 0)
1695                         ret = hclge_vport_setup(vport, tqp_main_vport);
1696                 else
1697                         ret = hclge_vport_setup(vport, tqp_per_vport);
1698                 if (ret) {
1699                         dev_err(&pdev->dev,
1700                                 "vport setup failed for vport %d, %d\n",
1701                                 i, ret);
1702                         return ret;
1703                 }
1704
1705                 vport++;
1706         }
1707
1708         return 0;
1709 }
1710
1711 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1712                                     struct hclge_pkt_buf_alloc *buf_alloc)
1713 {
1714 /* TX buffer size is unit by 128 byte */
1715 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1716 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1717         struct hclge_tx_buff_alloc_cmd *req;
1718         struct hclge_desc desc;
1719         int ret;
1720         u8 i;
1721
1722         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1723
1724         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1725         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1726                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1727
1728                 req->tx_pkt_buff[i] =
1729                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1730                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1731         }
1732
1733         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1734         if (ret)
1735                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1736                         ret);
1737
1738         return ret;
1739 }
1740
1741 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1742                                  struct hclge_pkt_buf_alloc *buf_alloc)
1743 {
1744         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1745
1746         if (ret)
1747                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1748
1749         return ret;
1750 }
1751
1752 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1753 {
1754         unsigned int i;
1755         u32 cnt = 0;
1756
1757         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1758                 if (hdev->hw_tc_map & BIT(i))
1759                         cnt++;
1760         return cnt;
1761 }
1762
1763 /* Get the number of pfc enabled TCs, which have private buffer */
1764 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1765                                   struct hclge_pkt_buf_alloc *buf_alloc)
1766 {
1767         struct hclge_priv_buf *priv;
1768         unsigned int i;
1769         int cnt = 0;
1770
1771         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1772                 priv = &buf_alloc->priv_buf[i];
1773                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1774                     priv->enable)
1775                         cnt++;
1776         }
1777
1778         return cnt;
1779 }
1780
1781 /* Get the number of pfc disabled TCs, which have private buffer */
1782 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1783                                      struct hclge_pkt_buf_alloc *buf_alloc)
1784 {
1785         struct hclge_priv_buf *priv;
1786         unsigned int i;
1787         int cnt = 0;
1788
1789         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1790                 priv = &buf_alloc->priv_buf[i];
1791                 if (hdev->hw_tc_map & BIT(i) &&
1792                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1793                     priv->enable)
1794                         cnt++;
1795         }
1796
1797         return cnt;
1798 }
1799
1800 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1801 {
1802         struct hclge_priv_buf *priv;
1803         u32 rx_priv = 0;
1804         int i;
1805
1806         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1807                 priv = &buf_alloc->priv_buf[i];
1808                 if (priv->enable)
1809                         rx_priv += priv->buf_size;
1810         }
1811         return rx_priv;
1812 }
1813
1814 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1815 {
1816         u32 i, total_tx_size = 0;
1817
1818         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1819                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1820
1821         return total_tx_size;
1822 }
1823
1824 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1825                                 struct hclge_pkt_buf_alloc *buf_alloc,
1826                                 u32 rx_all)
1827 {
1828         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1829         u32 tc_num = hclge_get_tc_num(hdev);
1830         u32 shared_buf, aligned_mps;
1831         u32 rx_priv;
1832         int i;
1833
1834         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1835
1836         if (hnae3_dev_dcb_supported(hdev))
1837                 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1838                                         hdev->dv_buf_size;
1839         else
1840                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1841                                         + hdev->dv_buf_size;
1842
1843         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1844         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1845                              HCLGE_BUF_SIZE_UNIT);
1846
1847         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1848         if (rx_all < rx_priv + shared_std)
1849                 return false;
1850
1851         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1852         buf_alloc->s_buf.buf_size = shared_buf;
1853         if (hnae3_dev_dcb_supported(hdev)) {
1854                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1855                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1856                         - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1857                                   HCLGE_BUF_SIZE_UNIT);
1858         } else {
1859                 buf_alloc->s_buf.self.high = aligned_mps +
1860                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1861                 buf_alloc->s_buf.self.low = aligned_mps;
1862         }
1863
1864         if (hnae3_dev_dcb_supported(hdev)) {
1865                 hi_thrd = shared_buf - hdev->dv_buf_size;
1866
1867                 if (tc_num <= NEED_RESERVE_TC_NUM)
1868                         hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1869                                         / BUF_MAX_PERCENT;
1870
1871                 if (tc_num)
1872                         hi_thrd = hi_thrd / tc_num;
1873
1874                 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1875                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1876                 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1877         } else {
1878                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1879                 lo_thrd = aligned_mps;
1880         }
1881
1882         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1883                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1884                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1885         }
1886
1887         return true;
1888 }
1889
1890 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1891                                 struct hclge_pkt_buf_alloc *buf_alloc)
1892 {
1893         u32 i, total_size;
1894
1895         total_size = hdev->pkt_buf_size;
1896
1897         /* alloc tx buffer for all enabled tc */
1898         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1899                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1900
1901                 if (hdev->hw_tc_map & BIT(i)) {
1902                         if (total_size < hdev->tx_buf_size)
1903                                 return -ENOMEM;
1904
1905                         priv->tx_buf_size = hdev->tx_buf_size;
1906                 } else {
1907                         priv->tx_buf_size = 0;
1908                 }
1909
1910                 total_size -= priv->tx_buf_size;
1911         }
1912
1913         return 0;
1914 }
1915
1916 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1917                                   struct hclge_pkt_buf_alloc *buf_alloc)
1918 {
1919         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1920         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1921         unsigned int i;
1922
1923         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1924                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1925
1926                 priv->enable = 0;
1927                 priv->wl.low = 0;
1928                 priv->wl.high = 0;
1929                 priv->buf_size = 0;
1930
1931                 if (!(hdev->hw_tc_map & BIT(i)))
1932                         continue;
1933
1934                 priv->enable = 1;
1935
1936                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1937                         priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1938                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
1939                                                 HCLGE_BUF_SIZE_UNIT);
1940                 } else {
1941                         priv->wl.low = 0;
1942                         priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1943                                         aligned_mps;
1944                 }
1945
1946                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1947         }
1948
1949         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1950 }
1951
1952 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1953                                           struct hclge_pkt_buf_alloc *buf_alloc)
1954 {
1955         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1956         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1957         int i;
1958
1959         /* let the last to be cleared first */
1960         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1961                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1962                 unsigned int mask = BIT((unsigned int)i);
1963
1964                 if (hdev->hw_tc_map & mask &&
1965                     !(hdev->tm_info.hw_pfc_map & mask)) {
1966                         /* Clear the no pfc TC private buffer */
1967                         priv->wl.low = 0;
1968                         priv->wl.high = 0;
1969                         priv->buf_size = 0;
1970                         priv->enable = 0;
1971                         no_pfc_priv_num--;
1972                 }
1973
1974                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1975                     no_pfc_priv_num == 0)
1976                         break;
1977         }
1978
1979         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1980 }
1981
1982 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1983                                         struct hclge_pkt_buf_alloc *buf_alloc)
1984 {
1985         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1986         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1987         int i;
1988
1989         /* let the last to be cleared first */
1990         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1991                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1992                 unsigned int mask = BIT((unsigned int)i);
1993
1994                 if (hdev->hw_tc_map & mask &&
1995                     hdev->tm_info.hw_pfc_map & mask) {
1996                         /* Reduce the number of pfc TC with private buffer */
1997                         priv->wl.low = 0;
1998                         priv->enable = 0;
1999                         priv->wl.high = 0;
2000                         priv->buf_size = 0;
2001                         pfc_priv_num--;
2002                 }
2003
2004                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2005                     pfc_priv_num == 0)
2006                         break;
2007         }
2008
2009         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2010 }
2011
2012 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2013                                       struct hclge_pkt_buf_alloc *buf_alloc)
2014 {
2015 #define COMPENSATE_BUFFER       0x3C00
2016 #define COMPENSATE_HALF_MPS_NUM 5
2017 #define PRIV_WL_GAP             0x1800
2018
2019         u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2020         u32 tc_num = hclge_get_tc_num(hdev);
2021         u32 half_mps = hdev->mps >> 1;
2022         u32 min_rx_priv;
2023         unsigned int i;
2024
2025         if (tc_num)
2026                 rx_priv = rx_priv / tc_num;
2027
2028         if (tc_num <= NEED_RESERVE_TC_NUM)
2029                 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2030
2031         min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2032                         COMPENSATE_HALF_MPS_NUM * half_mps;
2033         min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2034         rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2035
2036         if (rx_priv < min_rx_priv)
2037                 return false;
2038
2039         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2040                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2041
2042                 priv->enable = 0;
2043                 priv->wl.low = 0;
2044                 priv->wl.high = 0;
2045                 priv->buf_size = 0;
2046
2047                 if (!(hdev->hw_tc_map & BIT(i)))
2048                         continue;
2049
2050                 priv->enable = 1;
2051                 priv->buf_size = rx_priv;
2052                 priv->wl.high = rx_priv - hdev->dv_buf_size;
2053                 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2054         }
2055
2056         buf_alloc->s_buf.buf_size = 0;
2057
2058         return true;
2059 }
2060
2061 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2062  * @hdev: pointer to struct hclge_dev
2063  * @buf_alloc: pointer to buffer calculation data
2064  * @return: 0: calculate sucessful, negative: fail
2065  */
2066 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2067                                 struct hclge_pkt_buf_alloc *buf_alloc)
2068 {
2069         /* When DCB is not supported, rx private buffer is not allocated. */
2070         if (!hnae3_dev_dcb_supported(hdev)) {
2071                 u32 rx_all = hdev->pkt_buf_size;
2072
2073                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2074                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2075                         return -ENOMEM;
2076
2077                 return 0;
2078         }
2079
2080         if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2081                 return 0;
2082
2083         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2084                 return 0;
2085
2086         /* try to decrease the buffer size */
2087         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2088                 return 0;
2089
2090         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2091                 return 0;
2092
2093         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2094                 return 0;
2095
2096         return -ENOMEM;
2097 }
2098
2099 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2100                                    struct hclge_pkt_buf_alloc *buf_alloc)
2101 {
2102         struct hclge_rx_priv_buff_cmd *req;
2103         struct hclge_desc desc;
2104         int ret;
2105         int i;
2106
2107         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2108         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2109
2110         /* Alloc private buffer TCs */
2111         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2112                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2113
2114                 req->buf_num[i] =
2115                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2116                 req->buf_num[i] |=
2117                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2118         }
2119
2120         req->shared_buf =
2121                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2122                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
2123
2124         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2125         if (ret)
2126                 dev_err(&hdev->pdev->dev,
2127                         "rx private buffer alloc cmd failed %d\n", ret);
2128
2129         return ret;
2130 }
2131
2132 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2133                                    struct hclge_pkt_buf_alloc *buf_alloc)
2134 {
2135         struct hclge_rx_priv_wl_buf *req;
2136         struct hclge_priv_buf *priv;
2137         struct hclge_desc desc[2];
2138         int i, j;
2139         int ret;
2140
2141         for (i = 0; i < 2; i++) {
2142                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2143                                            false);
2144                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2145
2146                 /* The first descriptor set the NEXT bit to 1 */
2147                 if (i == 0)
2148                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2149                 else
2150                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2151
2152                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2153                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2154
2155                         priv = &buf_alloc->priv_buf[idx];
2156                         req->tc_wl[j].high =
2157                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2158                         req->tc_wl[j].high |=
2159                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2160                         req->tc_wl[j].low =
2161                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2162                         req->tc_wl[j].low |=
2163                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2164                 }
2165         }
2166
2167         /* Send 2 descriptor at one time */
2168         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2169         if (ret)
2170                 dev_err(&hdev->pdev->dev,
2171                         "rx private waterline config cmd failed %d\n",
2172                         ret);
2173         return ret;
2174 }
2175
2176 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2177                                     struct hclge_pkt_buf_alloc *buf_alloc)
2178 {
2179         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2180         struct hclge_rx_com_thrd *req;
2181         struct hclge_desc desc[2];
2182         struct hclge_tc_thrd *tc;
2183         int i, j;
2184         int ret;
2185
2186         for (i = 0; i < 2; i++) {
2187                 hclge_cmd_setup_basic_desc(&desc[i],
2188                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2189                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2190
2191                 /* The first descriptor set the NEXT bit to 1 */
2192                 if (i == 0)
2193                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2194                 else
2195                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2196
2197                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2198                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2199
2200                         req->com_thrd[j].high =
2201                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2202                         req->com_thrd[j].high |=
2203                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2204                         req->com_thrd[j].low =
2205                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2206                         req->com_thrd[j].low |=
2207                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2208                 }
2209         }
2210
2211         /* Send 2 descriptors at one time */
2212         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2213         if (ret)
2214                 dev_err(&hdev->pdev->dev,
2215                         "common threshold config cmd failed %d\n", ret);
2216         return ret;
2217 }
2218
2219 static int hclge_common_wl_config(struct hclge_dev *hdev,
2220                                   struct hclge_pkt_buf_alloc *buf_alloc)
2221 {
2222         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2223         struct hclge_rx_com_wl *req;
2224         struct hclge_desc desc;
2225         int ret;
2226
2227         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2228
2229         req = (struct hclge_rx_com_wl *)desc.data;
2230         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2231         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2232
2233         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2234         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2235
2236         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2237         if (ret)
2238                 dev_err(&hdev->pdev->dev,
2239                         "common waterline config cmd failed %d\n", ret);
2240
2241         return ret;
2242 }
2243
2244 int hclge_buffer_alloc(struct hclge_dev *hdev)
2245 {
2246         struct hclge_pkt_buf_alloc *pkt_buf;
2247         int ret;
2248
2249         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2250         if (!pkt_buf)
2251                 return -ENOMEM;
2252
2253         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2254         if (ret) {
2255                 dev_err(&hdev->pdev->dev,
2256                         "could not calc tx buffer size for all TCs %d\n", ret);
2257                 goto out;
2258         }
2259
2260         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2261         if (ret) {
2262                 dev_err(&hdev->pdev->dev,
2263                         "could not alloc tx buffers %d\n", ret);
2264                 goto out;
2265         }
2266
2267         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2268         if (ret) {
2269                 dev_err(&hdev->pdev->dev,
2270                         "could not calc rx priv buffer size for all TCs %d\n",
2271                         ret);
2272                 goto out;
2273         }
2274
2275         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2276         if (ret) {
2277                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2278                         ret);
2279                 goto out;
2280         }
2281
2282         if (hnae3_dev_dcb_supported(hdev)) {
2283                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2284                 if (ret) {
2285                         dev_err(&hdev->pdev->dev,
2286                                 "could not configure rx private waterline %d\n",
2287                                 ret);
2288                         goto out;
2289                 }
2290
2291                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2292                 if (ret) {
2293                         dev_err(&hdev->pdev->dev,
2294                                 "could not configure common threshold %d\n",
2295                                 ret);
2296                         goto out;
2297                 }
2298         }
2299
2300         ret = hclge_common_wl_config(hdev, pkt_buf);
2301         if (ret)
2302                 dev_err(&hdev->pdev->dev,
2303                         "could not configure common waterline %d\n", ret);
2304
2305 out:
2306         kfree(pkt_buf);
2307         return ret;
2308 }
2309
2310 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2311 {
2312         struct hnae3_handle *roce = &vport->roce;
2313         struct hnae3_handle *nic = &vport->nic;
2314
2315         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2316
2317         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2318             vport->back->num_msi_left == 0)
2319                 return -EINVAL;
2320
2321         roce->rinfo.base_vector = vport->back->roce_base_vector;
2322
2323         roce->rinfo.netdev = nic->kinfo.netdev;
2324         roce->rinfo.roce_io_base = vport->back->hw.io_base;
2325
2326         roce->pdev = nic->pdev;
2327         roce->ae_algo = nic->ae_algo;
2328         roce->numa_node_mask = nic->numa_node_mask;
2329
2330         return 0;
2331 }
2332
2333 static int hclge_init_msi(struct hclge_dev *hdev)
2334 {
2335         struct pci_dev *pdev = hdev->pdev;
2336         int vectors;
2337         int i;
2338
2339         vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2340                                         hdev->num_msi,
2341                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2342         if (vectors < 0) {
2343                 dev_err(&pdev->dev,
2344                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2345                         vectors);
2346                 return vectors;
2347         }
2348         if (vectors < hdev->num_msi)
2349                 dev_warn(&hdev->pdev->dev,
2350                          "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2351                          hdev->num_msi, vectors);
2352
2353         hdev->num_msi = vectors;
2354         hdev->num_msi_left = vectors;
2355
2356         hdev->base_msi_vector = pdev->irq;
2357         hdev->roce_base_vector = hdev->base_msi_vector +
2358                                 hdev->roce_base_msix_offset;
2359
2360         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2361                                            sizeof(u16), GFP_KERNEL);
2362         if (!hdev->vector_status) {
2363                 pci_free_irq_vectors(pdev);
2364                 return -ENOMEM;
2365         }
2366
2367         for (i = 0; i < hdev->num_msi; i++)
2368                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2369
2370         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2371                                         sizeof(int), GFP_KERNEL);
2372         if (!hdev->vector_irq) {
2373                 pci_free_irq_vectors(pdev);
2374                 return -ENOMEM;
2375         }
2376
2377         return 0;
2378 }
2379
2380 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2381 {
2382         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2383                 duplex = HCLGE_MAC_FULL;
2384
2385         return duplex;
2386 }
2387
2388 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2389                                       u8 duplex)
2390 {
2391         struct hclge_config_mac_speed_dup_cmd *req;
2392         struct hclge_desc desc;
2393         int ret;
2394
2395         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2396
2397         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2398
2399         if (duplex)
2400                 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2401
2402         switch (speed) {
2403         case HCLGE_MAC_SPEED_10M:
2404                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2405                                 HCLGE_CFG_SPEED_S, 6);
2406                 break;
2407         case HCLGE_MAC_SPEED_100M:
2408                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2409                                 HCLGE_CFG_SPEED_S, 7);
2410                 break;
2411         case HCLGE_MAC_SPEED_1G:
2412                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2413                                 HCLGE_CFG_SPEED_S, 0);
2414                 break;
2415         case HCLGE_MAC_SPEED_10G:
2416                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2417                                 HCLGE_CFG_SPEED_S, 1);
2418                 break;
2419         case HCLGE_MAC_SPEED_25G:
2420                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2421                                 HCLGE_CFG_SPEED_S, 2);
2422                 break;
2423         case HCLGE_MAC_SPEED_40G:
2424                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2425                                 HCLGE_CFG_SPEED_S, 3);
2426                 break;
2427         case HCLGE_MAC_SPEED_50G:
2428                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2429                                 HCLGE_CFG_SPEED_S, 4);
2430                 break;
2431         case HCLGE_MAC_SPEED_100G:
2432                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2433                                 HCLGE_CFG_SPEED_S, 5);
2434                 break;
2435         default:
2436                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2437                 return -EINVAL;
2438         }
2439
2440         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2441                       1);
2442
2443         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2444         if (ret) {
2445                 dev_err(&hdev->pdev->dev,
2446                         "mac speed/duplex config cmd failed %d.\n", ret);
2447                 return ret;
2448         }
2449
2450         return 0;
2451 }
2452
2453 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2454 {
2455         int ret;
2456
2457         duplex = hclge_check_speed_dup(duplex, speed);
2458         if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2459                 return 0;
2460
2461         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2462         if (ret)
2463                 return ret;
2464
2465         hdev->hw.mac.speed = speed;
2466         hdev->hw.mac.duplex = duplex;
2467
2468         return 0;
2469 }
2470
2471 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2472                                      u8 duplex)
2473 {
2474         struct hclge_vport *vport = hclge_get_vport(handle);
2475         struct hclge_dev *hdev = vport->back;
2476
2477         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2478 }
2479
2480 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2481 {
2482         struct hclge_config_auto_neg_cmd *req;
2483         struct hclge_desc desc;
2484         u32 flag = 0;
2485         int ret;
2486
2487         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2488
2489         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2490         if (enable)
2491                 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2492         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2493
2494         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2495         if (ret)
2496                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2497                         ret);
2498
2499         return ret;
2500 }
2501
2502 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2503 {
2504         struct hclge_vport *vport = hclge_get_vport(handle);
2505         struct hclge_dev *hdev = vport->back;
2506
2507         if (!hdev->hw.mac.support_autoneg) {
2508                 if (enable) {
2509                         dev_err(&hdev->pdev->dev,
2510                                 "autoneg is not supported by current port\n");
2511                         return -EOPNOTSUPP;
2512                 } else {
2513                         return 0;
2514                 }
2515         }
2516
2517         return hclge_set_autoneg_en(hdev, enable);
2518 }
2519
2520 static int hclge_get_autoneg(struct hnae3_handle *handle)
2521 {
2522         struct hclge_vport *vport = hclge_get_vport(handle);
2523         struct hclge_dev *hdev = vport->back;
2524         struct phy_device *phydev = hdev->hw.mac.phydev;
2525
2526         if (phydev)
2527                 return phydev->autoneg;
2528
2529         return hdev->hw.mac.autoneg;
2530 }
2531
2532 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2533 {
2534         struct hclge_vport *vport = hclge_get_vport(handle);
2535         struct hclge_dev *hdev = vport->back;
2536         int ret;
2537
2538         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2539
2540         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2541         if (ret)
2542                 return ret;
2543         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2544 }
2545
2546 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2547 {
2548         struct hclge_vport *vport = hclge_get_vport(handle);
2549         struct hclge_dev *hdev = vport->back;
2550
2551         if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2552                 return hclge_set_autoneg_en(hdev, !halt);
2553
2554         return 0;
2555 }
2556
2557 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2558 {
2559         struct hclge_config_fec_cmd *req;
2560         struct hclge_desc desc;
2561         int ret;
2562
2563         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2564
2565         req = (struct hclge_config_fec_cmd *)desc.data;
2566         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2567                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2568         if (fec_mode & BIT(HNAE3_FEC_RS))
2569                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2570                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2571         if (fec_mode & BIT(HNAE3_FEC_BASER))
2572                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2573                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2574
2575         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2576         if (ret)
2577                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2578
2579         return ret;
2580 }
2581
2582 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2583 {
2584         struct hclge_vport *vport = hclge_get_vport(handle);
2585         struct hclge_dev *hdev = vport->back;
2586         struct hclge_mac *mac = &hdev->hw.mac;
2587         int ret;
2588
2589         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2590                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2591                 return -EINVAL;
2592         }
2593
2594         ret = hclge_set_fec_hw(hdev, fec_mode);
2595         if (ret)
2596                 return ret;
2597
2598         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2599         return 0;
2600 }
2601
2602 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2603                           u8 *fec_mode)
2604 {
2605         struct hclge_vport *vport = hclge_get_vport(handle);
2606         struct hclge_dev *hdev = vport->back;
2607         struct hclge_mac *mac = &hdev->hw.mac;
2608
2609         if (fec_ability)
2610                 *fec_ability = mac->fec_ability;
2611         if (fec_mode)
2612                 *fec_mode = mac->fec_mode;
2613 }
2614
2615 static int hclge_mac_init(struct hclge_dev *hdev)
2616 {
2617         struct hclge_mac *mac = &hdev->hw.mac;
2618         int ret;
2619
2620         hdev->support_sfp_query = true;
2621         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2622         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2623                                          hdev->hw.mac.duplex);
2624         if (ret) {
2625                 dev_err(&hdev->pdev->dev,
2626                         "Config mac speed dup fail ret=%d\n", ret);
2627                 return ret;
2628         }
2629
2630         if (hdev->hw.mac.support_autoneg) {
2631                 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2632                 if (ret) {
2633                         dev_err(&hdev->pdev->dev,
2634                                 "Config mac autoneg fail ret=%d\n", ret);
2635                         return ret;
2636                 }
2637         }
2638
2639         mac->link = 0;
2640
2641         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2642                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2643                 if (ret) {
2644                         dev_err(&hdev->pdev->dev,
2645                                 "Fec mode init fail, ret = %d\n", ret);
2646                         return ret;
2647                 }
2648         }
2649
2650         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2651         if (ret) {
2652                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2653                 return ret;
2654         }
2655
2656         ret = hclge_set_default_loopback(hdev);
2657         if (ret)
2658                 return ret;
2659
2660         ret = hclge_buffer_alloc(hdev);
2661         if (ret)
2662                 dev_err(&hdev->pdev->dev,
2663                         "allocate buffer fail, ret=%d\n", ret);
2664
2665         return ret;
2666 }
2667
2668 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2669 {
2670         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2671             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2672                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2673                                     hclge_wq, &hdev->service_task, 0);
2674 }
2675
2676 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2677 {
2678         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2679             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2680                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2681                                     hclge_wq, &hdev->service_task, 0);
2682 }
2683
2684 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2685 {
2686         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2687             !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2688                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2689                                     hclge_wq, &hdev->service_task,
2690                                     delay_time);
2691 }
2692
2693 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2694 {
2695         struct hclge_link_status_cmd *req;
2696         struct hclge_desc desc;
2697         int link_status;
2698         int ret;
2699
2700         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2701         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2702         if (ret) {
2703                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2704                         ret);
2705                 return ret;
2706         }
2707
2708         req = (struct hclge_link_status_cmd *)desc.data;
2709         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2710
2711         return !!link_status;
2712 }
2713
2714 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2715 {
2716         unsigned int mac_state;
2717         int link_stat;
2718
2719         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2720                 return 0;
2721
2722         mac_state = hclge_get_mac_link_status(hdev);
2723
2724         if (hdev->hw.mac.phydev) {
2725                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2726                         link_stat = mac_state &
2727                                 hdev->hw.mac.phydev->link;
2728                 else
2729                         link_stat = 0;
2730
2731         } else {
2732                 link_stat = mac_state;
2733         }
2734
2735         return !!link_stat;
2736 }
2737
2738 static void hclge_update_link_status(struct hclge_dev *hdev)
2739 {
2740         struct hnae3_client *rclient = hdev->roce_client;
2741         struct hnae3_client *client = hdev->nic_client;
2742         struct hnae3_handle *rhandle;
2743         struct hnae3_handle *handle;
2744         int state;
2745         int i;
2746
2747         if (!client)
2748                 return;
2749
2750         if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2751                 return;
2752
2753         state = hclge_get_mac_phy_link(hdev);
2754         if (state != hdev->hw.mac.link) {
2755                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2756                         handle = &hdev->vport[i].nic;
2757                         client->ops->link_status_change(handle, state);
2758                         hclge_config_mac_tnl_int(hdev, state);
2759                         rhandle = &hdev->vport[i].roce;
2760                         if (rclient && rclient->ops->link_status_change)
2761                                 rclient->ops->link_status_change(rhandle,
2762                                                                  state);
2763                 }
2764                 hdev->hw.mac.link = state;
2765         }
2766
2767         clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2768 }
2769
2770 static void hclge_update_port_capability(struct hclge_mac *mac)
2771 {
2772         /* update fec ability by speed */
2773         hclge_convert_setting_fec(mac);
2774
2775         /* firmware can not identify back plane type, the media type
2776          * read from configuration can help deal it
2777          */
2778         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2779             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2780                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2781         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2782                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2783
2784         if (mac->support_autoneg) {
2785                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2786                 linkmode_copy(mac->advertising, mac->supported);
2787         } else {
2788                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2789                                    mac->supported);
2790                 linkmode_zero(mac->advertising);
2791         }
2792 }
2793
2794 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2795 {
2796         struct hclge_sfp_info_cmd *resp;
2797         struct hclge_desc desc;
2798         int ret;
2799
2800         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2801         resp = (struct hclge_sfp_info_cmd *)desc.data;
2802         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2803         if (ret == -EOPNOTSUPP) {
2804                 dev_warn(&hdev->pdev->dev,
2805                          "IMP do not support get SFP speed %d\n", ret);
2806                 return ret;
2807         } else if (ret) {
2808                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2809                 return ret;
2810         }
2811
2812         *speed = le32_to_cpu(resp->speed);
2813
2814         return 0;
2815 }
2816
2817 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2818 {
2819         struct hclge_sfp_info_cmd *resp;
2820         struct hclge_desc desc;
2821         int ret;
2822
2823         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2824         resp = (struct hclge_sfp_info_cmd *)desc.data;
2825
2826         resp->query_type = QUERY_ACTIVE_SPEED;
2827
2828         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2829         if (ret == -EOPNOTSUPP) {
2830                 dev_warn(&hdev->pdev->dev,
2831                          "IMP does not support get SFP info %d\n", ret);
2832                 return ret;
2833         } else if (ret) {
2834                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2835                 return ret;
2836         }
2837
2838         /* In some case, mac speed get from IMP may be 0, it shouldn't be
2839          * set to mac->speed.
2840          */
2841         if (!le32_to_cpu(resp->speed))
2842                 return 0;
2843
2844         mac->speed = le32_to_cpu(resp->speed);
2845         /* if resp->speed_ability is 0, it means it's an old version
2846          * firmware, do not update these params
2847          */
2848         if (resp->speed_ability) {
2849                 mac->module_type = le32_to_cpu(resp->module_type);
2850                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2851                 mac->autoneg = resp->autoneg;
2852                 mac->support_autoneg = resp->autoneg_ability;
2853                 mac->speed_type = QUERY_ACTIVE_SPEED;
2854                 if (!resp->active_fec)
2855                         mac->fec_mode = 0;
2856                 else
2857                         mac->fec_mode = BIT(resp->active_fec);
2858         } else {
2859                 mac->speed_type = QUERY_SFP_SPEED;
2860         }
2861
2862         return 0;
2863 }
2864
2865 static int hclge_update_port_info(struct hclge_dev *hdev)
2866 {
2867         struct hclge_mac *mac = &hdev->hw.mac;
2868         int speed = HCLGE_MAC_SPEED_UNKNOWN;
2869         int ret;
2870
2871         /* get the port info from SFP cmd if not copper port */
2872         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2873                 return 0;
2874
2875         /* if IMP does not support get SFP/qSFP info, return directly */
2876         if (!hdev->support_sfp_query)
2877                 return 0;
2878
2879         if (hdev->pdev->revision >= 0x21)
2880                 ret = hclge_get_sfp_info(hdev, mac);
2881         else
2882                 ret = hclge_get_sfp_speed(hdev, &speed);
2883
2884         if (ret == -EOPNOTSUPP) {
2885                 hdev->support_sfp_query = false;
2886                 return ret;
2887         } else if (ret) {
2888                 return ret;
2889         }
2890
2891         if (hdev->pdev->revision >= 0x21) {
2892                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2893                         hclge_update_port_capability(mac);
2894                         return 0;
2895                 }
2896                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2897                                                HCLGE_MAC_FULL);
2898         } else {
2899                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2900                         return 0; /* do nothing if no SFP */
2901
2902                 /* must config full duplex for SFP */
2903                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2904         }
2905 }
2906
2907 static int hclge_get_status(struct hnae3_handle *handle)
2908 {
2909         struct hclge_vport *vport = hclge_get_vport(handle);
2910         struct hclge_dev *hdev = vport->back;
2911
2912         hclge_update_link_status(hdev);
2913
2914         return hdev->hw.mac.link;
2915 }
2916
2917 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
2918 {
2919         if (pci_num_vf(hdev->pdev) == 0) {
2920                 dev_err(&hdev->pdev->dev,
2921                         "SRIOV is disabled, can not get vport(%d) info.\n", vf);
2922                 return NULL;
2923         }
2924
2925         if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
2926                 dev_err(&hdev->pdev->dev,
2927                         "vf id(%d) is out of range(0 <= vfid < %d)\n",
2928                         vf, pci_num_vf(hdev->pdev));
2929                 return NULL;
2930         }
2931
2932         /* VF start from 1 in vport */
2933         vf += HCLGE_VF_VPORT_START_NUM;
2934         return &hdev->vport[vf];
2935 }
2936
2937 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
2938                                struct ifla_vf_info *ivf)
2939 {
2940         struct hclge_vport *vport = hclge_get_vport(handle);
2941         struct hclge_dev *hdev = vport->back;
2942
2943         vport = hclge_get_vf_vport(hdev, vf);
2944         if (!vport)
2945                 return -EINVAL;
2946
2947         ivf->vf = vf;
2948         ivf->linkstate = vport->vf_info.link_state;
2949         ivf->spoofchk = vport->vf_info.spoofchk;
2950         ivf->trusted = vport->vf_info.trusted;
2951         ivf->min_tx_rate = 0;
2952         ivf->max_tx_rate = vport->vf_info.max_tx_rate;
2953         ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
2954         ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
2955         ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
2956         ether_addr_copy(ivf->mac, vport->vf_info.mac);
2957
2958         return 0;
2959 }
2960
2961 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
2962                                    int link_state)
2963 {
2964         struct hclge_vport *vport = hclge_get_vport(handle);
2965         struct hclge_dev *hdev = vport->back;
2966
2967         vport = hclge_get_vf_vport(hdev, vf);
2968         if (!vport)
2969                 return -EINVAL;
2970
2971         vport->vf_info.link_state = link_state;
2972
2973         return 0;
2974 }
2975
2976 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2977 {
2978         u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2979
2980         /* fetch the events from their corresponding regs */
2981         rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2982         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2983         msix_src_reg = hclge_read_dev(&hdev->hw,
2984                                       HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2985
2986         /* Assumption: If by any chance reset and mailbox events are reported
2987          * together then we will only process reset event in this go and will
2988          * defer the processing of the mailbox events. Since, we would have not
2989          * cleared RX CMDQ event this time we would receive again another
2990          * interrupt from H/W just for the mailbox.
2991          *
2992          * check for vector0 reset event sources
2993          */
2994         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2995                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2996                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2997                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2998                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2999                 hdev->rst_stats.imp_rst_cnt++;
3000                 return HCLGE_VECTOR0_EVENT_RST;
3001         }
3002
3003         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
3004                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3005                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3006                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3007                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3008                 hdev->rst_stats.global_rst_cnt++;
3009                 return HCLGE_VECTOR0_EVENT_RST;
3010         }
3011
3012         /* check for vector0 msix event source */
3013         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3014                 *clearval = msix_src_reg;
3015                 return HCLGE_VECTOR0_EVENT_ERR;
3016         }
3017
3018         /* check for vector0 mailbox(=CMDQ RX) event source */
3019         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3020                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3021                 *clearval = cmdq_src_reg;
3022                 return HCLGE_VECTOR0_EVENT_MBX;
3023         }
3024
3025         /* print other vector0 event source */
3026         dev_info(&hdev->pdev->dev,
3027                  "CMDQ INT status:0x%x, other INT status:0x%x\n",
3028                  cmdq_src_reg, msix_src_reg);
3029         *clearval = msix_src_reg;
3030
3031         return HCLGE_VECTOR0_EVENT_OTHER;
3032 }
3033
3034 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3035                                     u32 regclr)
3036 {
3037         switch (event_type) {
3038         case HCLGE_VECTOR0_EVENT_RST:
3039                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3040                 break;
3041         case HCLGE_VECTOR0_EVENT_MBX:
3042                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3043                 break;
3044         default:
3045                 break;
3046         }
3047 }
3048
3049 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3050 {
3051         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3052                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3053                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3054                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3055         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3056 }
3057
3058 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3059 {
3060         writel(enable ? 1 : 0, vector->addr);
3061 }
3062
3063 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3064 {
3065         struct hclge_dev *hdev = data;
3066         u32 clearval = 0;
3067         u32 event_cause;
3068
3069         hclge_enable_vector(&hdev->misc_vector, false);
3070         event_cause = hclge_check_event_cause(hdev, &clearval);
3071
3072         /* vector 0 interrupt is shared with reset and mailbox source events.*/
3073         switch (event_cause) {
3074         case HCLGE_VECTOR0_EVENT_ERR:
3075                 /* we do not know what type of reset is required now. This could
3076                  * only be decided after we fetch the type of errors which
3077                  * caused this event. Therefore, we will do below for now:
3078                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3079                  *    have defered type of reset to be used.
3080                  * 2. Schedule the reset serivce task.
3081                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3082                  *    will fetch the correct type of reset.  This would be done
3083                  *    by first decoding the types of errors.
3084                  */
3085                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3086                 /* fall through */
3087         case HCLGE_VECTOR0_EVENT_RST:
3088                 hclge_reset_task_schedule(hdev);
3089                 break;
3090         case HCLGE_VECTOR0_EVENT_MBX:
3091                 /* If we are here then,
3092                  * 1. Either we are not handling any mbx task and we are not
3093                  *    scheduled as well
3094                  *                        OR
3095                  * 2. We could be handling a mbx task but nothing more is
3096                  *    scheduled.
3097                  * In both cases, we should schedule mbx task as there are more
3098                  * mbx messages reported by this interrupt.
3099                  */
3100                 hclge_mbx_task_schedule(hdev);
3101                 break;
3102         default:
3103                 dev_warn(&hdev->pdev->dev,
3104                          "received unknown or unhandled event of vector0\n");
3105                 break;
3106         }
3107
3108         hclge_clear_event_cause(hdev, event_cause, clearval);
3109
3110         /* Enable interrupt if it is not cause by reset. And when
3111          * clearval equal to 0, it means interrupt status may be
3112          * cleared by hardware before driver reads status register.
3113          * For this case, vector0 interrupt also should be enabled.
3114          */
3115         if (!clearval ||
3116             event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3117                 hclge_enable_vector(&hdev->misc_vector, true);
3118         }
3119
3120         return IRQ_HANDLED;
3121 }
3122
3123 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3124 {
3125         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3126                 dev_warn(&hdev->pdev->dev,
3127                          "vector(vector_id %d) has been freed.\n", vector_id);
3128                 return;
3129         }
3130
3131         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3132         hdev->num_msi_left += 1;
3133         hdev->num_msi_used -= 1;
3134 }
3135
3136 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3137 {
3138         struct hclge_misc_vector *vector = &hdev->misc_vector;
3139
3140         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3141
3142         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3143         hdev->vector_status[0] = 0;
3144
3145         hdev->num_msi_left -= 1;
3146         hdev->num_msi_used += 1;
3147 }
3148
3149 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3150                                       const cpumask_t *mask)
3151 {
3152         struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3153                                               affinity_notify);
3154
3155         cpumask_copy(&hdev->affinity_mask, mask);
3156 }
3157
3158 static void hclge_irq_affinity_release(struct kref *ref)
3159 {
3160 }
3161
3162 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3163 {
3164         irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3165                               &hdev->affinity_mask);
3166
3167         hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3168         hdev->affinity_notify.release = hclge_irq_affinity_release;
3169         irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3170                                   &hdev->affinity_notify);
3171 }
3172
3173 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3174 {
3175         irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3176         irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3177 }
3178
3179 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3180 {
3181         int ret;
3182
3183         hclge_get_misc_vector(hdev);
3184
3185         /* this would be explicitly freed in the end */
3186         snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3187                  HCLGE_NAME, pci_name(hdev->pdev));
3188         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3189                           0, hdev->misc_vector.name, hdev);
3190         if (ret) {
3191                 hclge_free_vector(hdev, 0);
3192                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3193                         hdev->misc_vector.vector_irq);
3194         }
3195
3196         return ret;
3197 }
3198
3199 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3200 {
3201         free_irq(hdev->misc_vector.vector_irq, hdev);
3202         hclge_free_vector(hdev, 0);
3203 }
3204
3205 int hclge_notify_client(struct hclge_dev *hdev,
3206                         enum hnae3_reset_notify_type type)
3207 {
3208         struct hnae3_client *client = hdev->nic_client;
3209         u16 i;
3210
3211         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3212                 return 0;
3213
3214         if (!client->ops->reset_notify)
3215                 return -EOPNOTSUPP;
3216
3217         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3218                 struct hnae3_handle *handle = &hdev->vport[i].nic;
3219                 int ret;
3220
3221                 ret = client->ops->reset_notify(handle, type);
3222                 if (ret) {
3223                         dev_err(&hdev->pdev->dev,
3224                                 "notify nic client failed %d(%d)\n", type, ret);
3225                         return ret;
3226                 }
3227         }
3228
3229         return 0;
3230 }
3231
3232 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3233                                     enum hnae3_reset_notify_type type)
3234 {
3235         struct hnae3_client *client = hdev->roce_client;
3236         int ret = 0;
3237         u16 i;
3238
3239         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3240                 return 0;
3241
3242         if (!client->ops->reset_notify)
3243                 return -EOPNOTSUPP;
3244
3245         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3246                 struct hnae3_handle *handle = &hdev->vport[i].roce;
3247
3248                 ret = client->ops->reset_notify(handle, type);
3249                 if (ret) {
3250                         dev_err(&hdev->pdev->dev,
3251                                 "notify roce client failed %d(%d)",
3252                                 type, ret);
3253                         return ret;
3254                 }
3255         }
3256
3257         return ret;
3258 }
3259
3260 static int hclge_reset_wait(struct hclge_dev *hdev)
3261 {
3262 #define HCLGE_RESET_WATI_MS     100
3263 #define HCLGE_RESET_WAIT_CNT    350
3264
3265         u32 val, reg, reg_bit;
3266         u32 cnt = 0;
3267
3268         switch (hdev->reset_type) {
3269         case HNAE3_IMP_RESET:
3270                 reg = HCLGE_GLOBAL_RESET_REG;
3271                 reg_bit = HCLGE_IMP_RESET_BIT;
3272                 break;
3273         case HNAE3_GLOBAL_RESET:
3274                 reg = HCLGE_GLOBAL_RESET_REG;
3275                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3276                 break;
3277         case HNAE3_FUNC_RESET:
3278                 reg = HCLGE_FUN_RST_ING;
3279                 reg_bit = HCLGE_FUN_RST_ING_B;
3280                 break;
3281         default:
3282                 dev_err(&hdev->pdev->dev,
3283                         "Wait for unsupported reset type: %d\n",
3284                         hdev->reset_type);
3285                 return -EINVAL;
3286         }
3287
3288         val = hclge_read_dev(&hdev->hw, reg);
3289         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3290                 msleep(HCLGE_RESET_WATI_MS);
3291                 val = hclge_read_dev(&hdev->hw, reg);
3292                 cnt++;
3293         }
3294
3295         if (cnt >= HCLGE_RESET_WAIT_CNT) {
3296                 dev_warn(&hdev->pdev->dev,
3297                          "Wait for reset timeout: %d\n", hdev->reset_type);
3298                 return -EBUSY;
3299         }
3300
3301         return 0;
3302 }
3303
3304 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3305 {
3306         struct hclge_vf_rst_cmd *req;
3307         struct hclge_desc desc;
3308
3309         req = (struct hclge_vf_rst_cmd *)desc.data;
3310         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3311         req->dest_vfid = func_id;
3312
3313         if (reset)
3314                 req->vf_rst = 0x1;
3315
3316         return hclge_cmd_send(&hdev->hw, &desc, 1);
3317 }
3318
3319 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3320 {
3321         int i;
3322
3323         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3324                 struct hclge_vport *vport = &hdev->vport[i];
3325                 int ret;
3326
3327                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3328                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3329                 if (ret) {
3330                         dev_err(&hdev->pdev->dev,
3331                                 "set vf(%u) rst failed %d!\n",
3332                                 vport->vport_id, ret);
3333                         return ret;
3334                 }
3335
3336                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3337                         continue;
3338
3339                 /* Inform VF to process the reset.
3340                  * hclge_inform_reset_assert_to_vf may fail if VF
3341                  * driver is not loaded.
3342                  */
3343                 ret = hclge_inform_reset_assert_to_vf(vport);
3344                 if (ret)
3345                         dev_warn(&hdev->pdev->dev,
3346                                  "inform reset to vf(%u) failed %d!\n",
3347                                  vport->vport_id, ret);
3348         }
3349
3350         return 0;
3351 }
3352
3353 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3354 {
3355         if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3356             test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3357             test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3358                 return;
3359
3360         hclge_mbx_handler(hdev);
3361
3362         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3363 }
3364
3365 static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3366 {
3367         struct hclge_pf_rst_sync_cmd *req;
3368         struct hclge_desc desc;
3369         int cnt = 0;
3370         int ret;
3371
3372         req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3373         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3374
3375         do {
3376                 /* vf need to down netdev by mbx during PF or FLR reset */
3377                 hclge_mailbox_service_task(hdev);
3378
3379                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3380                 /* for compatible with old firmware, wait
3381                  * 100 ms for VF to stop IO
3382                  */
3383                 if (ret == -EOPNOTSUPP) {
3384                         msleep(HCLGE_RESET_SYNC_TIME);
3385                         return 0;
3386                 } else if (ret) {
3387                         dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n",
3388                                 ret);
3389                         return ret;
3390                 } else if (req->all_vf_ready) {
3391                         return 0;
3392                 }
3393                 msleep(HCLGE_PF_RESET_SYNC_TIME);
3394                 hclge_cmd_reuse_desc(&desc, true);
3395         } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3396
3397         dev_err(&hdev->pdev->dev, "sync with VF timeout!\n");
3398         return -ETIME;
3399 }
3400
3401 void hclge_report_hw_error(struct hclge_dev *hdev,
3402                            enum hnae3_hw_error_type type)
3403 {
3404         struct hnae3_client *client = hdev->nic_client;
3405         u16 i;
3406
3407         if (!client || !client->ops->process_hw_error ||
3408             !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3409                 return;
3410
3411         for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3412                 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3413 }
3414
3415 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3416 {
3417         u32 reg_val;
3418
3419         reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3420         if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3421                 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3422                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3423                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3424         }
3425
3426         if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3427                 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3428                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3429                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3430         }
3431 }
3432
3433 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3434 {
3435         struct hclge_desc desc;
3436         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3437         int ret;
3438
3439         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3440         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3441         req->fun_reset_vfid = func_id;
3442
3443         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3444         if (ret)
3445                 dev_err(&hdev->pdev->dev,
3446                         "send function reset cmd fail, status =%d\n", ret);
3447
3448         return ret;
3449 }
3450
3451 static void hclge_do_reset(struct hclge_dev *hdev)
3452 {
3453         struct hnae3_handle *handle = &hdev->vport[0].nic;
3454         struct pci_dev *pdev = hdev->pdev;
3455         u32 val;
3456
3457         if (hclge_get_hw_reset_stat(handle)) {
3458                 dev_info(&pdev->dev, "Hardware reset not finish\n");
3459                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3460                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3461                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3462                 return;
3463         }
3464
3465         switch (hdev->reset_type) {
3466         case HNAE3_GLOBAL_RESET:
3467                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3468                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3469                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3470                 dev_info(&pdev->dev, "Global Reset requested\n");
3471                 break;
3472         case HNAE3_FUNC_RESET:
3473                 dev_info(&pdev->dev, "PF Reset requested\n");
3474                 /* schedule again to check later */
3475                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3476                 hclge_reset_task_schedule(hdev);
3477                 break;
3478         default:
3479                 dev_warn(&pdev->dev,
3480                          "Unsupported reset type: %d\n", hdev->reset_type);
3481                 break;
3482         }
3483 }
3484
3485 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3486                                                    unsigned long *addr)
3487 {
3488         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3489         struct hclge_dev *hdev = ae_dev->priv;
3490
3491         /* first, resolve any unknown reset type to the known type(s) */
3492         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3493                 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3494                                         HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
3495                 /* we will intentionally ignore any errors from this function
3496                  *  as we will end up in *some* reset request in any case
3497                  */
3498                 if (hclge_handle_hw_msix_error(hdev, addr))
3499                         dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3500                                  msix_sts_reg);
3501
3502                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3503                 /* We defered the clearing of the error event which caused
3504                  * interrupt since it was not posssible to do that in
3505                  * interrupt context (and this is the reason we introduced
3506                  * new UNKNOWN reset type). Now, the errors have been
3507                  * handled and cleared in hardware we can safely enable
3508                  * interrupts. This is an exception to the norm.
3509                  */
3510                 hclge_enable_vector(&hdev->misc_vector, true);
3511         }
3512
3513         /* return the highest priority reset level amongst all */
3514         if (test_bit(HNAE3_IMP_RESET, addr)) {
3515                 rst_level = HNAE3_IMP_RESET;
3516                 clear_bit(HNAE3_IMP_RESET, addr);
3517                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3518                 clear_bit(HNAE3_FUNC_RESET, addr);
3519         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3520                 rst_level = HNAE3_GLOBAL_RESET;
3521                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3522                 clear_bit(HNAE3_FUNC_RESET, addr);
3523         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3524                 rst_level = HNAE3_FUNC_RESET;
3525                 clear_bit(HNAE3_FUNC_RESET, addr);
3526         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3527                 rst_level = HNAE3_FLR_RESET;
3528                 clear_bit(HNAE3_FLR_RESET, addr);
3529         }
3530
3531         if (hdev->reset_type != HNAE3_NONE_RESET &&
3532             rst_level < hdev->reset_type)
3533                 return HNAE3_NONE_RESET;
3534
3535         return rst_level;
3536 }
3537
3538 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3539 {
3540         u32 clearval = 0;
3541
3542         switch (hdev->reset_type) {
3543         case HNAE3_IMP_RESET:
3544                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3545                 break;
3546         case HNAE3_GLOBAL_RESET:
3547                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3548                 break;
3549         default:
3550                 break;
3551         }
3552
3553         if (!clearval)
3554                 return;
3555
3556         /* For revision 0x20, the reset interrupt source
3557          * can only be cleared after hardware reset done
3558          */
3559         if (hdev->pdev->revision == 0x20)
3560                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3561                                 clearval);
3562
3563         hclge_enable_vector(&hdev->misc_vector, true);
3564 }
3565
3566 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3567 {
3568         int ret = 0;
3569
3570         switch (hdev->reset_type) {
3571         case HNAE3_FUNC_RESET:
3572                 /* fall through */
3573         case HNAE3_FLR_RESET:
3574                 ret = hclge_set_all_vf_rst(hdev, true);
3575                 break;
3576         default:
3577                 break;
3578         }
3579
3580         return ret;
3581 }
3582
3583 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3584 {
3585         u32 reg_val;
3586
3587         reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3588         if (enable)
3589                 reg_val |= HCLGE_NIC_SW_RST_RDY;
3590         else
3591                 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3592
3593         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3594 }
3595
3596 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3597 {
3598         u32 reg_val;
3599         int ret = 0;
3600
3601         switch (hdev->reset_type) {
3602         case HNAE3_FUNC_RESET:
3603                 /* to confirm whether all running VF is ready
3604                  * before request PF reset
3605                  */
3606                 ret = hclge_func_reset_sync_vf(hdev);
3607                 if (ret)
3608                         return ret;
3609
3610                 ret = hclge_func_reset_cmd(hdev, 0);
3611                 if (ret) {
3612                         dev_err(&hdev->pdev->dev,
3613                                 "asserting function reset fail %d!\n", ret);
3614                         return ret;
3615                 }
3616
3617                 /* After performaning pf reset, it is not necessary to do the
3618                  * mailbox handling or send any command to firmware, because
3619                  * any mailbox handling or command to firmware is only valid
3620                  * after hclge_cmd_init is called.
3621                  */
3622                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3623                 hdev->rst_stats.pf_rst_cnt++;
3624                 break;
3625         case HNAE3_FLR_RESET:
3626                 /* to confirm whether all running VF is ready
3627                  * before request PF reset
3628                  */
3629                 ret = hclge_func_reset_sync_vf(hdev);
3630                 if (ret)
3631                         return ret;
3632                 break;
3633         case HNAE3_IMP_RESET:
3634                 hclge_handle_imp_error(hdev);
3635                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3636                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3637                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3638                 break;
3639         default:
3640                 break;
3641         }
3642
3643         /* inform hardware that preparatory work is done */
3644         msleep(HCLGE_RESET_SYNC_TIME);
3645         hclge_reset_handshake(hdev, true);
3646         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3647
3648         return ret;
3649 }
3650
3651 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3652 {
3653 #define MAX_RESET_FAIL_CNT 5
3654
3655         if (hdev->reset_pending) {
3656                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3657                          hdev->reset_pending);
3658                 return true;
3659         } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3660                    HCLGE_RESET_INT_M) {
3661                 dev_info(&hdev->pdev->dev,
3662                          "reset failed because new reset interrupt\n");
3663                 hclge_clear_reset_cause(hdev);
3664                 return false;
3665         } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3666                 hdev->rst_stats.reset_fail_cnt++;
3667                 set_bit(hdev->reset_type, &hdev->reset_pending);
3668                 dev_info(&hdev->pdev->dev,
3669                          "re-schedule reset task(%u)\n",
3670                          hdev->rst_stats.reset_fail_cnt);
3671                 return true;
3672         }
3673
3674         hclge_clear_reset_cause(hdev);
3675
3676         /* recover the handshake status when reset fail */
3677         hclge_reset_handshake(hdev, true);
3678
3679         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3680
3681         hclge_dbg_dump_rst_info(hdev);
3682
3683         set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3684
3685         return false;
3686 }
3687
3688 static int hclge_set_rst_done(struct hclge_dev *hdev)
3689 {
3690         struct hclge_pf_rst_done_cmd *req;
3691         struct hclge_desc desc;
3692         int ret;
3693
3694         req = (struct hclge_pf_rst_done_cmd *)desc.data;
3695         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3696         req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3697
3698         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3699         /* To be compatible with the old firmware, which does not support
3700          * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3701          * return success
3702          */
3703         if (ret == -EOPNOTSUPP) {
3704                 dev_warn(&hdev->pdev->dev,
3705                          "current firmware does not support command(0x%x)!\n",
3706                          HCLGE_OPC_PF_RST_DONE);
3707                 return 0;
3708         } else if (ret) {
3709                 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3710                         ret);
3711         }
3712
3713         return ret;
3714 }
3715
3716 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3717 {
3718         int ret = 0;
3719
3720         switch (hdev->reset_type) {
3721         case HNAE3_FUNC_RESET:
3722                 /* fall through */
3723         case HNAE3_FLR_RESET:
3724                 ret = hclge_set_all_vf_rst(hdev, false);
3725                 break;
3726         case HNAE3_GLOBAL_RESET:
3727                 /* fall through */
3728         case HNAE3_IMP_RESET:
3729                 ret = hclge_set_rst_done(hdev);
3730                 break;
3731         default:
3732                 break;
3733         }
3734
3735         /* clear up the handshake status after re-initialize done */
3736         hclge_reset_handshake(hdev, false);
3737
3738         return ret;
3739 }
3740
3741 static int hclge_reset_stack(struct hclge_dev *hdev)
3742 {
3743         int ret;
3744
3745         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3746         if (ret)
3747                 return ret;
3748
3749         ret = hclge_reset_ae_dev(hdev->ae_dev);
3750         if (ret)
3751                 return ret;
3752
3753         ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3754         if (ret)
3755                 return ret;
3756
3757         return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3758 }
3759
3760 static int hclge_reset_prepare(struct hclge_dev *hdev)
3761 {
3762         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3763         int ret;
3764
3765         /* Initialize ae_dev reset status as well, in case enet layer wants to
3766          * know if device is undergoing reset
3767          */
3768         ae_dev->reset_type = hdev->reset_type;
3769         hdev->rst_stats.reset_cnt++;
3770         /* perform reset of the stack & ae device for a client */
3771         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3772         if (ret)
3773                 return ret;
3774
3775         ret = hclge_reset_prepare_down(hdev);
3776         if (ret)
3777                 return ret;
3778
3779         rtnl_lock();
3780         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3781         rtnl_unlock();
3782         if (ret)
3783                 return ret;
3784
3785         return hclge_reset_prepare_wait(hdev);
3786 }
3787
3788 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3789 {
3790         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3791         enum hnae3_reset_type reset_level;
3792         int ret;
3793
3794         hdev->rst_stats.hw_reset_done_cnt++;
3795
3796         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3797         if (ret)
3798                 return ret;
3799
3800         rtnl_lock();
3801         ret = hclge_reset_stack(hdev);
3802         rtnl_unlock();
3803         if (ret)
3804                 return ret;
3805
3806         hclge_clear_reset_cause(hdev);
3807
3808         ret = hclge_reset_prepare_up(hdev);
3809         if (ret)
3810                 return ret;
3811
3812
3813         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3814         /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3815          * times
3816          */
3817         if (ret &&
3818             hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3819                 return ret;
3820
3821         rtnl_lock();
3822         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3823         rtnl_unlock();
3824         if (ret)
3825                 return ret;
3826
3827         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3828         if (ret)
3829                 return ret;
3830
3831         hdev->last_reset_time = jiffies;
3832         hdev->rst_stats.reset_fail_cnt = 0;
3833         hdev->rst_stats.reset_done_cnt++;
3834         ae_dev->reset_type = HNAE3_NONE_RESET;
3835         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3836
3837         /* if default_reset_request has a higher level reset request,
3838          * it should be handled as soon as possible. since some errors
3839          * need this kind of reset to fix.
3840          */
3841         reset_level = hclge_get_reset_level(ae_dev,
3842                                             &hdev->default_reset_request);
3843         if (reset_level != HNAE3_NONE_RESET)
3844                 set_bit(reset_level, &hdev->reset_request);
3845
3846         return 0;
3847 }
3848
3849 static void hclge_reset(struct hclge_dev *hdev)
3850 {
3851         if (hclge_reset_prepare(hdev))
3852                 goto err_reset;
3853
3854         if (hclge_reset_wait(hdev))
3855                 goto err_reset;
3856
3857         if (hclge_reset_rebuild(hdev))
3858                 goto err_reset;
3859
3860         return;
3861
3862 err_reset:
3863         if (hclge_reset_err_handle(hdev))
3864                 hclge_reset_task_schedule(hdev);
3865 }
3866
3867 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3868 {
3869         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3870         struct hclge_dev *hdev = ae_dev->priv;
3871
3872         /* We might end up getting called broadly because of 2 below cases:
3873          * 1. Recoverable error was conveyed through APEI and only way to bring
3874          *    normalcy is to reset.
3875          * 2. A new reset request from the stack due to timeout
3876          *
3877          * For the first case,error event might not have ae handle available.
3878          * check if this is a new reset request and we are not here just because
3879          * last reset attempt did not succeed and watchdog hit us again. We will
3880          * know this if last reset request did not occur very recently (watchdog
3881          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3882          * In case of new request we reset the "reset level" to PF reset.
3883          * And if it is a repeat reset request of the most recent one then we
3884          * want to make sure we throttle the reset request. Therefore, we will
3885          * not allow it again before 3*HZ times.
3886          */
3887         if (!handle)
3888                 handle = &hdev->vport[0].nic;
3889
3890         if (time_before(jiffies, (hdev->last_reset_time +
3891                                   HCLGE_RESET_INTERVAL))) {
3892                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3893                 return;
3894         } else if (hdev->default_reset_request) {
3895                 hdev->reset_level =
3896                         hclge_get_reset_level(ae_dev,
3897                                               &hdev->default_reset_request);
3898         } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3899                 hdev->reset_level = HNAE3_FUNC_RESET;
3900         }
3901
3902         dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3903                  hdev->reset_level);
3904
3905         /* request reset & schedule reset task */
3906         set_bit(hdev->reset_level, &hdev->reset_request);
3907         hclge_reset_task_schedule(hdev);
3908
3909         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3910                 hdev->reset_level++;
3911 }
3912
3913 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3914                                         enum hnae3_reset_type rst_type)
3915 {
3916         struct hclge_dev *hdev = ae_dev->priv;
3917
3918         set_bit(rst_type, &hdev->default_reset_request);
3919 }
3920
3921 static void hclge_reset_timer(struct timer_list *t)
3922 {
3923         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3924
3925         /* if default_reset_request has no value, it means that this reset
3926          * request has already be handled, so just return here
3927          */
3928         if (!hdev->default_reset_request)
3929                 return;
3930
3931         dev_info(&hdev->pdev->dev,
3932                  "triggering reset in reset timer\n");
3933         hclge_reset_event(hdev->pdev, NULL);
3934 }
3935
3936 static void hclge_reset_subtask(struct hclge_dev *hdev)
3937 {
3938         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3939
3940         /* check if there is any ongoing reset in the hardware. This status can
3941          * be checked from reset_pending. If there is then, we need to wait for
3942          * hardware to complete reset.
3943          *    a. If we are able to figure out in reasonable time that hardware
3944          *       has fully resetted then, we can proceed with driver, client
3945          *       reset.
3946          *    b. else, we can come back later to check this status so re-sched
3947          *       now.
3948          */
3949         hdev->last_reset_time = jiffies;
3950         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3951         if (hdev->reset_type != HNAE3_NONE_RESET)
3952                 hclge_reset(hdev);
3953
3954         /* check if we got any *new* reset requests to be honored */
3955         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3956         if (hdev->reset_type != HNAE3_NONE_RESET)
3957                 hclge_do_reset(hdev);
3958
3959         hdev->reset_type = HNAE3_NONE_RESET;
3960 }
3961
3962 static void hclge_reset_service_task(struct hclge_dev *hdev)
3963 {
3964         if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
3965                 return;
3966
3967         down(&hdev->reset_sem);
3968         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3969
3970         hclge_reset_subtask(hdev);
3971
3972         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3973         up(&hdev->reset_sem);
3974 }
3975
3976 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3977 {
3978         int i;
3979
3980         /* start from vport 1 for PF is always alive */
3981         for (i = 1; i < hdev->num_alloc_vport; i++) {
3982                 struct hclge_vport *vport = &hdev->vport[i];
3983
3984                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3985                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3986
3987                 /* If vf is not alive, set to default value */
3988                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3989                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3990         }
3991 }
3992
3993 static void hclge_periodic_service_task(struct hclge_dev *hdev)
3994 {
3995         unsigned long delta = round_jiffies_relative(HZ);
3996
3997         /* Always handle the link updating to make sure link state is
3998          * updated when it is triggered by mbx.
3999          */
4000         hclge_update_link_status(hdev);
4001
4002         if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4003                 delta = jiffies - hdev->last_serv_processed;
4004
4005                 if (delta < round_jiffies_relative(HZ)) {
4006                         delta = round_jiffies_relative(HZ) - delta;
4007                         goto out;
4008                 }
4009         }
4010
4011         hdev->serv_processed_cnt++;
4012         hclge_update_vport_alive(hdev);
4013
4014         if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4015                 hdev->last_serv_processed = jiffies;
4016                 goto out;
4017         }
4018
4019         if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4020                 hclge_update_stats_for_all(hdev);
4021
4022         hclge_update_port_info(hdev);
4023         hclge_sync_vlan_filter(hdev);
4024
4025         if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4026                 hclge_rfs_filter_expire(hdev);
4027
4028         hdev->last_serv_processed = jiffies;
4029
4030 out:
4031         hclge_task_schedule(hdev, delta);
4032 }
4033
4034 static void hclge_service_task(struct work_struct *work)
4035 {
4036         struct hclge_dev *hdev =
4037                 container_of(work, struct hclge_dev, service_task.work);
4038
4039         hclge_reset_service_task(hdev);
4040         hclge_mailbox_service_task(hdev);
4041         hclge_periodic_service_task(hdev);
4042
4043         /* Handle reset and mbx again in case periodical task delays the
4044          * handling by calling hclge_task_schedule() in
4045          * hclge_periodic_service_task().
4046          */
4047         hclge_reset_service_task(hdev);
4048         hclge_mailbox_service_task(hdev);
4049 }
4050
4051 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4052 {
4053         /* VF handle has no client */
4054         if (!handle->client)
4055                 return container_of(handle, struct hclge_vport, nic);
4056         else if (handle->client->type == HNAE3_CLIENT_ROCE)
4057                 return container_of(handle, struct hclge_vport, roce);
4058         else
4059                 return container_of(handle, struct hclge_vport, nic);
4060 }
4061
4062 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4063                             struct hnae3_vector_info *vector_info)
4064 {
4065         struct hclge_vport *vport = hclge_get_vport(handle);
4066         struct hnae3_vector_info *vector = vector_info;
4067         struct hclge_dev *hdev = vport->back;
4068         int alloc = 0;
4069         int i, j;
4070
4071         vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4072         vector_num = min(hdev->num_msi_left, vector_num);
4073
4074         for (j = 0; j < vector_num; j++) {
4075                 for (i = 1; i < hdev->num_msi; i++) {
4076                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4077                                 vector->vector = pci_irq_vector(hdev->pdev, i);
4078                                 vector->io_addr = hdev->hw.io_base +
4079                                         HCLGE_VECTOR_REG_BASE +
4080                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
4081                                         vport->vport_id *
4082                                         HCLGE_VECTOR_VF_OFFSET;
4083                                 hdev->vector_status[i] = vport->vport_id;
4084                                 hdev->vector_irq[i] = vector->vector;
4085
4086                                 vector++;
4087                                 alloc++;
4088
4089                                 break;
4090                         }
4091                 }
4092         }
4093         hdev->num_msi_left -= alloc;
4094         hdev->num_msi_used += alloc;
4095
4096         return alloc;
4097 }
4098
4099 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4100 {
4101         int i;
4102
4103         for (i = 0; i < hdev->num_msi; i++)
4104                 if (vector == hdev->vector_irq[i])
4105                         return i;
4106
4107         return -EINVAL;
4108 }
4109
4110 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4111 {
4112         struct hclge_vport *vport = hclge_get_vport(handle);
4113         struct hclge_dev *hdev = vport->back;
4114         int vector_id;
4115
4116         vector_id = hclge_get_vector_index(hdev, vector);
4117         if (vector_id < 0) {
4118                 dev_err(&hdev->pdev->dev,
4119                         "Get vector index fail. vector_id =%d\n", vector_id);
4120                 return vector_id;
4121         }
4122
4123         hclge_free_vector(hdev, vector_id);
4124
4125         return 0;
4126 }
4127
4128 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4129 {
4130         return HCLGE_RSS_KEY_SIZE;
4131 }
4132
4133 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4134 {
4135         return HCLGE_RSS_IND_TBL_SIZE;
4136 }
4137
4138 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4139                                   const u8 hfunc, const u8 *key)
4140 {
4141         struct hclge_rss_config_cmd *req;
4142         unsigned int key_offset = 0;
4143         struct hclge_desc desc;
4144         int key_counts;
4145         int key_size;
4146         int ret;
4147
4148         key_counts = HCLGE_RSS_KEY_SIZE;
4149         req = (struct hclge_rss_config_cmd *)desc.data;
4150
4151         while (key_counts) {
4152                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4153                                            false);
4154
4155                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4156                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4157
4158                 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4159                 memcpy(req->hash_key,
4160                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4161
4162                 key_counts -= key_size;
4163                 key_offset++;
4164                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4165                 if (ret) {
4166                         dev_err(&hdev->pdev->dev,
4167                                 "Configure RSS config fail, status = %d\n",
4168                                 ret);
4169                         return ret;
4170                 }
4171         }
4172         return 0;
4173 }
4174
4175 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4176 {
4177         struct hclge_rss_indirection_table_cmd *req;
4178         struct hclge_desc desc;
4179         int i, j;
4180         int ret;
4181
4182         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4183
4184         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4185                 hclge_cmd_setup_basic_desc
4186                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4187
4188                 req->start_table_index =
4189                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4190                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4191
4192                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4193                         req->rss_result[j] =
4194                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4195
4196                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4197                 if (ret) {
4198                         dev_err(&hdev->pdev->dev,
4199                                 "Configure rss indir table fail,status = %d\n",
4200                                 ret);
4201                         return ret;
4202                 }
4203         }
4204         return 0;
4205 }
4206
4207 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4208                                  u16 *tc_size, u16 *tc_offset)
4209 {
4210         struct hclge_rss_tc_mode_cmd *req;
4211         struct hclge_desc desc;
4212         int ret;
4213         int i;
4214
4215         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4216         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4217
4218         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4219                 u16 mode = 0;
4220
4221                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4222                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4223                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4224                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4225                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4226
4227                 req->rss_tc_mode[i] = cpu_to_le16(mode);
4228         }
4229
4230         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4231         if (ret)
4232                 dev_err(&hdev->pdev->dev,
4233                         "Configure rss tc mode fail, status = %d\n", ret);
4234
4235         return ret;
4236 }
4237
4238 static void hclge_get_rss_type(struct hclge_vport *vport)
4239 {
4240         if (vport->rss_tuple_sets.ipv4_tcp_en ||
4241             vport->rss_tuple_sets.ipv4_udp_en ||
4242             vport->rss_tuple_sets.ipv4_sctp_en ||
4243             vport->rss_tuple_sets.ipv6_tcp_en ||
4244             vport->rss_tuple_sets.ipv6_udp_en ||
4245             vport->rss_tuple_sets.ipv6_sctp_en)
4246                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4247         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4248                  vport->rss_tuple_sets.ipv6_fragment_en)
4249                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4250         else
4251                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4252 }
4253
4254 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4255 {
4256         struct hclge_rss_input_tuple_cmd *req;
4257         struct hclge_desc desc;
4258         int ret;
4259
4260         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4261
4262         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4263
4264         /* Get the tuple cfg from pf */
4265         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4266         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4267         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4268         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4269         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4270         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4271         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4272         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4273         hclge_get_rss_type(&hdev->vport[0]);
4274         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4275         if (ret)
4276                 dev_err(&hdev->pdev->dev,
4277                         "Configure rss input fail, status = %d\n", ret);
4278         return ret;
4279 }
4280
4281 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4282                          u8 *key, u8 *hfunc)
4283 {
4284         struct hclge_vport *vport = hclge_get_vport(handle);
4285         int i;
4286
4287         /* Get hash algorithm */
4288         if (hfunc) {
4289                 switch (vport->rss_algo) {
4290                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4291                         *hfunc = ETH_RSS_HASH_TOP;
4292                         break;
4293                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4294                         *hfunc = ETH_RSS_HASH_XOR;
4295                         break;
4296                 default:
4297                         *hfunc = ETH_RSS_HASH_UNKNOWN;
4298                         break;
4299                 }
4300         }
4301
4302         /* Get the RSS Key required by the user */
4303         if (key)
4304                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4305
4306         /* Get indirect table */
4307         if (indir)
4308                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4309                         indir[i] =  vport->rss_indirection_tbl[i];
4310
4311         return 0;
4312 }
4313
4314 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4315                          const  u8 *key, const  u8 hfunc)
4316 {
4317         struct hclge_vport *vport = hclge_get_vport(handle);
4318         struct hclge_dev *hdev = vport->back;
4319         u8 hash_algo;
4320         int ret, i;
4321
4322         /* Set the RSS Hash Key if specififed by the user */
4323         if (key) {
4324                 switch (hfunc) {
4325                 case ETH_RSS_HASH_TOP:
4326                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4327                         break;
4328                 case ETH_RSS_HASH_XOR:
4329                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4330                         break;
4331                 case ETH_RSS_HASH_NO_CHANGE:
4332                         hash_algo = vport->rss_algo;
4333                         break;
4334                 default:
4335                         return -EINVAL;
4336                 }
4337
4338                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4339                 if (ret)
4340                         return ret;
4341
4342                 /* Update the shadow RSS key with user specified qids */
4343                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4344                 vport->rss_algo = hash_algo;
4345         }
4346
4347         /* Update the shadow RSS table with user specified qids */
4348         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4349                 vport->rss_indirection_tbl[i] = indir[i];
4350
4351         /* Update the hardware */
4352         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4353 }
4354
4355 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4356 {
4357         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4358
4359         if (nfc->data & RXH_L4_B_2_3)
4360                 hash_sets |= HCLGE_D_PORT_BIT;
4361         else
4362                 hash_sets &= ~HCLGE_D_PORT_BIT;
4363
4364         if (nfc->data & RXH_IP_SRC)
4365                 hash_sets |= HCLGE_S_IP_BIT;
4366         else
4367                 hash_sets &= ~HCLGE_S_IP_BIT;
4368
4369         if (nfc->data & RXH_IP_DST)
4370                 hash_sets |= HCLGE_D_IP_BIT;
4371         else
4372                 hash_sets &= ~HCLGE_D_IP_BIT;
4373
4374         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4375                 hash_sets |= HCLGE_V_TAG_BIT;
4376
4377         return hash_sets;
4378 }
4379
4380 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4381                                struct ethtool_rxnfc *nfc)
4382 {
4383         struct hclge_vport *vport = hclge_get_vport(handle);
4384         struct hclge_dev *hdev = vport->back;
4385         struct hclge_rss_input_tuple_cmd *req;
4386         struct hclge_desc desc;
4387         u8 tuple_sets;
4388         int ret;
4389
4390         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4391                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
4392                 return -EINVAL;
4393
4394         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4395         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4396
4397         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4398         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4399         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4400         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4401         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4402         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4403         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4404         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4405
4406         tuple_sets = hclge_get_rss_hash_bits(nfc);
4407         switch (nfc->flow_type) {
4408         case TCP_V4_FLOW:
4409                 req->ipv4_tcp_en = tuple_sets;
4410                 break;
4411         case TCP_V6_FLOW:
4412                 req->ipv6_tcp_en = tuple_sets;
4413                 break;
4414         case UDP_V4_FLOW:
4415                 req->ipv4_udp_en = tuple_sets;
4416                 break;
4417         case UDP_V6_FLOW:
4418                 req->ipv6_udp_en = tuple_sets;
4419                 break;
4420         case SCTP_V4_FLOW:
4421                 req->ipv4_sctp_en = tuple_sets;
4422                 break;
4423         case SCTP_V6_FLOW:
4424                 if ((nfc->data & RXH_L4_B_0_1) ||
4425                     (nfc->data & RXH_L4_B_2_3))
4426                         return -EINVAL;
4427
4428                 req->ipv6_sctp_en = tuple_sets;
4429                 break;
4430         case IPV4_FLOW:
4431                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4432                 break;
4433         case IPV6_FLOW:
4434                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4435                 break;
4436         default:
4437                 return -EINVAL;
4438         }
4439
4440         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4441         if (ret) {
4442                 dev_err(&hdev->pdev->dev,
4443                         "Set rss tuple fail, status = %d\n", ret);
4444                 return ret;
4445         }
4446
4447         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4448         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4449         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4450         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4451         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4452         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4453         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4454         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4455         hclge_get_rss_type(vport);
4456         return 0;
4457 }
4458
4459 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4460                                struct ethtool_rxnfc *nfc)
4461 {
4462         struct hclge_vport *vport = hclge_get_vport(handle);
4463         u8 tuple_sets;
4464
4465         nfc->data = 0;
4466
4467         switch (nfc->flow_type) {
4468         case TCP_V4_FLOW:
4469                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4470                 break;
4471         case UDP_V4_FLOW:
4472                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4473                 break;
4474         case TCP_V6_FLOW:
4475                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4476                 break;
4477         case UDP_V6_FLOW:
4478                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4479                 break;
4480         case SCTP_V4_FLOW:
4481                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4482                 break;
4483         case SCTP_V6_FLOW:
4484                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4485                 break;
4486         case IPV4_FLOW:
4487         case IPV6_FLOW:
4488                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4489                 break;
4490         default:
4491                 return -EINVAL;
4492         }
4493
4494         if (!tuple_sets)
4495                 return 0;
4496
4497         if (tuple_sets & HCLGE_D_PORT_BIT)
4498                 nfc->data |= RXH_L4_B_2_3;
4499         if (tuple_sets & HCLGE_S_PORT_BIT)
4500                 nfc->data |= RXH_L4_B_0_1;
4501         if (tuple_sets & HCLGE_D_IP_BIT)
4502                 nfc->data |= RXH_IP_DST;
4503         if (tuple_sets & HCLGE_S_IP_BIT)
4504                 nfc->data |= RXH_IP_SRC;
4505
4506         return 0;
4507 }
4508
4509 static int hclge_get_tc_size(struct hnae3_handle *handle)
4510 {
4511         struct hclge_vport *vport = hclge_get_vport(handle);
4512         struct hclge_dev *hdev = vport->back;
4513
4514         return hdev->rss_size_max;
4515 }
4516
4517 int hclge_rss_init_hw(struct hclge_dev *hdev)
4518 {
4519         struct hclge_vport *vport = hdev->vport;
4520         u8 *rss_indir = vport[0].rss_indirection_tbl;
4521         u16 rss_size = vport[0].alloc_rss_size;
4522         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4523         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4524         u8 *key = vport[0].rss_hash_key;
4525         u8 hfunc = vport[0].rss_algo;
4526         u16 tc_valid[HCLGE_MAX_TC_NUM];
4527         u16 roundup_size;
4528         unsigned int i;
4529         int ret;
4530
4531         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4532         if (ret)
4533                 return ret;
4534
4535         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4536         if (ret)
4537                 return ret;
4538
4539         ret = hclge_set_rss_input_tuple(hdev);
4540         if (ret)
4541                 return ret;
4542
4543         /* Each TC have the same queue size, and tc_size set to hardware is
4544          * the log2 of roundup power of two of rss_size, the acutal queue
4545          * size is limited by indirection table.
4546          */
4547         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4548                 dev_err(&hdev->pdev->dev,
4549                         "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4550                         rss_size);
4551                 return -EINVAL;
4552         }
4553
4554         roundup_size = roundup_pow_of_two(rss_size);
4555         roundup_size = ilog2(roundup_size);
4556
4557         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4558                 tc_valid[i] = 0;
4559
4560                 if (!(hdev->hw_tc_map & BIT(i)))
4561                         continue;
4562
4563                 tc_valid[i] = 1;
4564                 tc_size[i] = roundup_size;
4565                 tc_offset[i] = rss_size * i;
4566         }
4567
4568         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4569 }
4570
4571 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4572 {
4573         struct hclge_vport *vport = hdev->vport;
4574         int i, j;
4575
4576         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4577                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4578                         vport[j].rss_indirection_tbl[i] =
4579                                 i % vport[j].alloc_rss_size;
4580         }
4581 }
4582
4583 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4584 {
4585         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4586         struct hclge_vport *vport = hdev->vport;
4587
4588         if (hdev->pdev->revision >= 0x21)
4589                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4590
4591         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4592                 vport[i].rss_tuple_sets.ipv4_tcp_en =
4593                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4594                 vport[i].rss_tuple_sets.ipv4_udp_en =
4595                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4596                 vport[i].rss_tuple_sets.ipv4_sctp_en =
4597                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4598                 vport[i].rss_tuple_sets.ipv4_fragment_en =
4599                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4600                 vport[i].rss_tuple_sets.ipv6_tcp_en =
4601                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4602                 vport[i].rss_tuple_sets.ipv6_udp_en =
4603                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4604                 vport[i].rss_tuple_sets.ipv6_sctp_en =
4605                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4606                 vport[i].rss_tuple_sets.ipv6_fragment_en =
4607                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4608
4609                 vport[i].rss_algo = rss_algo;
4610
4611                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4612                        HCLGE_RSS_KEY_SIZE);
4613         }
4614
4615         hclge_rss_indir_init_cfg(hdev);
4616 }
4617
4618 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4619                                 int vector_id, bool en,
4620                                 struct hnae3_ring_chain_node *ring_chain)
4621 {
4622         struct hclge_dev *hdev = vport->back;
4623         struct hnae3_ring_chain_node *node;
4624         struct hclge_desc desc;
4625         struct hclge_ctrl_vector_chain_cmd *req =
4626                 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4627         enum hclge_cmd_status status;
4628         enum hclge_opcode_type op;
4629         u16 tqp_type_and_id;
4630         int i;
4631
4632         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4633         hclge_cmd_setup_basic_desc(&desc, op, false);
4634         req->int_vector_id = vector_id;
4635
4636         i = 0;
4637         for (node = ring_chain; node; node = node->next) {
4638                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4639                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4640                                 HCLGE_INT_TYPE_S,
4641                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4642                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4643                                 HCLGE_TQP_ID_S, node->tqp_index);
4644                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4645                                 HCLGE_INT_GL_IDX_S,
4646                                 hnae3_get_field(node->int_gl_idx,
4647                                                 HNAE3_RING_GL_IDX_M,
4648                                                 HNAE3_RING_GL_IDX_S));
4649                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4650                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4651                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4652                         req->vfid = vport->vport_id;
4653
4654                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4655                         if (status) {
4656                                 dev_err(&hdev->pdev->dev,
4657                                         "Map TQP fail, status is %d.\n",
4658                                         status);
4659                                 return -EIO;
4660                         }
4661                         i = 0;
4662
4663                         hclge_cmd_setup_basic_desc(&desc,
4664                                                    op,
4665                                                    false);
4666                         req->int_vector_id = vector_id;
4667                 }
4668         }
4669
4670         if (i > 0) {
4671                 req->int_cause_num = i;
4672                 req->vfid = vport->vport_id;
4673                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4674                 if (status) {
4675                         dev_err(&hdev->pdev->dev,
4676                                 "Map TQP fail, status is %d.\n", status);
4677                         return -EIO;
4678                 }
4679         }
4680
4681         return 0;
4682 }
4683
4684 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4685                                     struct hnae3_ring_chain_node *ring_chain)
4686 {
4687         struct hclge_vport *vport = hclge_get_vport(handle);
4688         struct hclge_dev *hdev = vport->back;
4689         int vector_id;
4690
4691         vector_id = hclge_get_vector_index(hdev, vector);
4692         if (vector_id < 0) {
4693                 dev_err(&hdev->pdev->dev,
4694                         "failed to get vector index. vector=%d\n", vector);
4695                 return vector_id;
4696         }
4697
4698         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4699 }
4700
4701 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4702                                        struct hnae3_ring_chain_node *ring_chain)
4703 {
4704         struct hclge_vport *vport = hclge_get_vport(handle);
4705         struct hclge_dev *hdev = vport->back;
4706         int vector_id, ret;
4707
4708         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4709                 return 0;
4710
4711         vector_id = hclge_get_vector_index(hdev, vector);
4712         if (vector_id < 0) {
4713                 dev_err(&handle->pdev->dev,
4714                         "Get vector index fail. ret =%d\n", vector_id);
4715                 return vector_id;
4716         }
4717
4718         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4719         if (ret)
4720                 dev_err(&handle->pdev->dev,
4721                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4722                         vector_id, ret);
4723
4724         return ret;
4725 }
4726
4727 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4728                                       struct hclge_promisc_param *param)
4729 {
4730         struct hclge_promisc_cfg_cmd *req;
4731         struct hclge_desc desc;
4732         int ret;
4733
4734         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4735
4736         req = (struct hclge_promisc_cfg_cmd *)desc.data;
4737         req->vf_id = param->vf_id;
4738
4739         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4740          * pdev revision(0x20), new revision support them. The
4741          * value of this two fields will not return error when driver
4742          * send command to fireware in revision(0x20).
4743          */
4744         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4745                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4746
4747         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4748         if (ret)
4749                 dev_err(&hdev->pdev->dev,
4750                         "Set promisc mode fail, status is %d.\n", ret);
4751
4752         return ret;
4753 }
4754
4755 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4756                                      bool en_uc, bool en_mc, bool en_bc,
4757                                      int vport_id)
4758 {
4759         if (!param)
4760                 return;
4761
4762         memset(param, 0, sizeof(struct hclge_promisc_param));
4763         if (en_uc)
4764                 param->enable = HCLGE_PROMISC_EN_UC;
4765         if (en_mc)
4766                 param->enable |= HCLGE_PROMISC_EN_MC;
4767         if (en_bc)
4768                 param->enable |= HCLGE_PROMISC_EN_BC;
4769         param->vf_id = vport_id;
4770 }
4771
4772 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4773                                  bool en_mc_pmc, bool en_bc_pmc)
4774 {
4775         struct hclge_dev *hdev = vport->back;
4776         struct hclge_promisc_param param;
4777
4778         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4779                                  vport->vport_id);
4780         return hclge_cmd_set_promisc_mode(hdev, &param);
4781 }
4782
4783 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4784                                   bool en_mc_pmc)
4785 {
4786         struct hclge_vport *vport = hclge_get_vport(handle);
4787         bool en_bc_pmc = true;
4788
4789         /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4790          * always bypassed. So broadcast promisc should be disabled until
4791          * user enable promisc mode
4792          */
4793         if (handle->pdev->revision == 0x20)
4794                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4795
4796         return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4797                                             en_bc_pmc);
4798 }
4799
4800 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4801 {
4802         struct hclge_get_fd_mode_cmd *req;
4803         struct hclge_desc desc;
4804         int ret;
4805
4806         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4807
4808         req = (struct hclge_get_fd_mode_cmd *)desc.data;
4809
4810         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4811         if (ret) {
4812                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4813                 return ret;
4814         }
4815
4816         *fd_mode = req->mode;
4817
4818         return ret;
4819 }
4820
4821 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4822                                    u32 *stage1_entry_num,
4823                                    u32 *stage2_entry_num,
4824                                    u16 *stage1_counter_num,
4825                                    u16 *stage2_counter_num)
4826 {
4827         struct hclge_get_fd_allocation_cmd *req;
4828         struct hclge_desc desc;
4829         int ret;
4830
4831         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4832
4833         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4834
4835         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4836         if (ret) {
4837                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4838                         ret);
4839                 return ret;
4840         }
4841
4842         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4843         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4844         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4845         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4846
4847         return ret;
4848 }
4849
4850 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4851 {
4852         struct hclge_set_fd_key_config_cmd *req;
4853         struct hclge_fd_key_cfg *stage;
4854         struct hclge_desc desc;
4855         int ret;
4856
4857         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4858
4859         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4860         stage = &hdev->fd_cfg.key_cfg[stage_num];
4861         req->stage = stage_num;
4862         req->key_select = stage->key_sel;
4863         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4864         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4865         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4866         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4867         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4868         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4869
4870         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4871         if (ret)
4872                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4873
4874         return ret;
4875 }
4876
4877 static int hclge_init_fd_config(struct hclge_dev *hdev)
4878 {
4879 #define LOW_2_WORDS             0x03
4880         struct hclge_fd_key_cfg *key_cfg;
4881         int ret;
4882
4883         if (!hnae3_dev_fd_supported(hdev))
4884                 return 0;
4885
4886         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4887         if (ret)
4888                 return ret;
4889
4890         switch (hdev->fd_cfg.fd_mode) {
4891         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4892                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4893                 break;
4894         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4895                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4896                 break;
4897         default:
4898                 dev_err(&hdev->pdev->dev,
4899                         "Unsupported flow director mode %u\n",
4900                         hdev->fd_cfg.fd_mode);
4901                 return -EOPNOTSUPP;
4902         }
4903
4904         hdev->fd_cfg.proto_support =
4905                 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4906                 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4907         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4908         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4909         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4910         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4911         key_cfg->outer_sipv6_word_en = 0;
4912         key_cfg->outer_dipv6_word_en = 0;
4913
4914         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4915                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4916                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4917                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4918
4919         /* If use max 400bit key, we can support tuples for ether type */
4920         if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4921                 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4922                 key_cfg->tuple_active |=
4923                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4924         }
4925
4926         /* roce_type is used to filter roce frames
4927          * dst_vport is used to specify the rule
4928          */
4929         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4930
4931         ret = hclge_get_fd_allocation(hdev,
4932                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4933                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4934                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4935                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4936         if (ret)
4937                 return ret;
4938
4939         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4940 }
4941
4942 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4943                                 int loc, u8 *key, bool is_add)
4944 {
4945         struct hclge_fd_tcam_config_1_cmd *req1;
4946         struct hclge_fd_tcam_config_2_cmd *req2;
4947         struct hclge_fd_tcam_config_3_cmd *req3;
4948         struct hclge_desc desc[3];
4949         int ret;
4950
4951         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4952         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4953         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4954         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4955         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4956
4957         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4958         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4959         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4960
4961         req1->stage = stage;
4962         req1->xy_sel = sel_x ? 1 : 0;
4963         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4964         req1->index = cpu_to_le32(loc);
4965         req1->entry_vld = sel_x ? is_add : 0;
4966
4967         if (key) {
4968                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4969                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4970                        sizeof(req2->tcam_data));
4971                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4972                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4973         }
4974
4975         ret = hclge_cmd_send(&hdev->hw, desc, 3);
4976         if (ret)
4977                 dev_err(&hdev->pdev->dev,
4978                         "config tcam key fail, ret=%d\n",
4979                         ret);
4980
4981         return ret;
4982 }
4983
4984 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4985                               struct hclge_fd_ad_data *action)
4986 {
4987         struct hclge_fd_ad_config_cmd *req;
4988         struct hclge_desc desc;
4989         u64 ad_data = 0;
4990         int ret;
4991
4992         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4993
4994         req = (struct hclge_fd_ad_config_cmd *)desc.data;
4995         req->index = cpu_to_le32(loc);
4996         req->stage = stage;
4997
4998         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4999                       action->write_rule_id_to_bd);
5000         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5001                         action->rule_id);
5002         ad_data <<= 32;
5003         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5004         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5005                       action->forward_to_direct_queue);
5006         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5007                         action->queue_id);
5008         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5009         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5010                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5011         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5012         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5013                         action->counter_id);
5014
5015         req->ad_data = cpu_to_le64(ad_data);
5016         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5017         if (ret)
5018                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5019
5020         return ret;
5021 }
5022
5023 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5024                                    struct hclge_fd_rule *rule)
5025 {
5026         u16 tmp_x_s, tmp_y_s;
5027         u32 tmp_x_l, tmp_y_l;
5028         int i;
5029
5030         if (rule->unused_tuple & tuple_bit)
5031                 return true;
5032
5033         switch (tuple_bit) {
5034         case 0:
5035                 return false;
5036         case BIT(INNER_DST_MAC):
5037                 for (i = 0; i < ETH_ALEN; i++) {
5038                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5039                                rule->tuples_mask.dst_mac[i]);
5040                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5041                                rule->tuples_mask.dst_mac[i]);
5042                 }
5043
5044                 return true;
5045         case BIT(INNER_SRC_MAC):
5046                 for (i = 0; i < ETH_ALEN; i++) {
5047                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5048                                rule->tuples.src_mac[i]);
5049                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5050                                rule->tuples.src_mac[i]);
5051                 }
5052
5053                 return true;
5054         case BIT(INNER_VLAN_TAG_FST):
5055                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5056                        rule->tuples_mask.vlan_tag1);
5057                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5058                        rule->tuples_mask.vlan_tag1);
5059                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5060                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5061
5062                 return true;
5063         case BIT(INNER_ETH_TYPE):
5064                 calc_x(tmp_x_s, rule->tuples.ether_proto,
5065                        rule->tuples_mask.ether_proto);
5066                 calc_y(tmp_y_s, rule->tuples.ether_proto,
5067                        rule->tuples_mask.ether_proto);
5068                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5069                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5070
5071                 return true;
5072         case BIT(INNER_IP_TOS):
5073                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5074                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5075
5076                 return true;
5077         case BIT(INNER_IP_PROTO):
5078                 calc_x(*key_x, rule->tuples.ip_proto,
5079                        rule->tuples_mask.ip_proto);
5080                 calc_y(*key_y, rule->tuples.ip_proto,
5081                        rule->tuples_mask.ip_proto);
5082
5083                 return true;
5084         case BIT(INNER_SRC_IP):
5085                 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5086                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5087                 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5088                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5089                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5090                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5091
5092                 return true;
5093         case BIT(INNER_DST_IP):
5094                 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5095                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5096                 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5097                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5098                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5099                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5100
5101                 return true;
5102         case BIT(INNER_SRC_PORT):
5103                 calc_x(tmp_x_s, rule->tuples.src_port,
5104                        rule->tuples_mask.src_port);
5105                 calc_y(tmp_y_s, rule->tuples.src_port,
5106                        rule->tuples_mask.src_port);
5107                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5108                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5109
5110                 return true;
5111         case BIT(INNER_DST_PORT):
5112                 calc_x(tmp_x_s, rule->tuples.dst_port,
5113                        rule->tuples_mask.dst_port);
5114                 calc_y(tmp_y_s, rule->tuples.dst_port,
5115                        rule->tuples_mask.dst_port);
5116                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5117                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5118
5119                 return true;
5120         default:
5121                 return false;
5122         }
5123 }
5124
5125 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5126                                  u8 vf_id, u8 network_port_id)
5127 {
5128         u32 port_number = 0;
5129
5130         if (port_type == HOST_PORT) {
5131                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5132                                 pf_id);
5133                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5134                                 vf_id);
5135                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5136         } else {
5137                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5138                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5139                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5140         }
5141
5142         return port_number;
5143 }
5144
5145 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5146                                        __le32 *key_x, __le32 *key_y,
5147                                        struct hclge_fd_rule *rule)
5148 {
5149         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5150         u8 cur_pos = 0, tuple_size, shift_bits;
5151         unsigned int i;
5152
5153         for (i = 0; i < MAX_META_DATA; i++) {
5154                 tuple_size = meta_data_key_info[i].key_length;
5155                 tuple_bit = key_cfg->meta_data_active & BIT(i);
5156
5157                 switch (tuple_bit) {
5158                 case BIT(ROCE_TYPE):
5159                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5160                         cur_pos += tuple_size;
5161                         break;
5162                 case BIT(DST_VPORT):
5163                         port_number = hclge_get_port_number(HOST_PORT, 0,
5164                                                             rule->vf_id, 0);
5165                         hnae3_set_field(meta_data,
5166                                         GENMASK(cur_pos + tuple_size, cur_pos),
5167                                         cur_pos, port_number);
5168                         cur_pos += tuple_size;
5169                         break;
5170                 default:
5171                         break;
5172                 }
5173         }
5174
5175         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5176         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5177         shift_bits = sizeof(meta_data) * 8 - cur_pos;
5178
5179         *key_x = cpu_to_le32(tmp_x << shift_bits);
5180         *key_y = cpu_to_le32(tmp_y << shift_bits);
5181 }
5182
5183 /* A complete key is combined with meta data key and tuple key.
5184  * Meta data key is stored at the MSB region, and tuple key is stored at
5185  * the LSB region, unused bits will be filled 0.
5186  */
5187 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5188                             struct hclge_fd_rule *rule)
5189 {
5190         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5191         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5192         u8 *cur_key_x, *cur_key_y;
5193         unsigned int i;
5194         int ret, tuple_size;
5195         u8 meta_data_region;
5196
5197         memset(key_x, 0, sizeof(key_x));
5198         memset(key_y, 0, sizeof(key_y));
5199         cur_key_x = key_x;
5200         cur_key_y = key_y;
5201
5202         for (i = 0 ; i < MAX_TUPLE; i++) {
5203                 bool tuple_valid;
5204                 u32 check_tuple;
5205
5206                 tuple_size = tuple_key_info[i].key_length / 8;
5207                 check_tuple = key_cfg->tuple_active & BIT(i);
5208
5209                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5210                                                      cur_key_y, rule);
5211                 if (tuple_valid) {
5212                         cur_key_x += tuple_size;
5213                         cur_key_y += tuple_size;
5214                 }
5215         }
5216
5217         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5218                         MAX_META_DATA_LENGTH / 8;
5219
5220         hclge_fd_convert_meta_data(key_cfg,
5221                                    (__le32 *)(key_x + meta_data_region),
5222                                    (__le32 *)(key_y + meta_data_region),
5223                                    rule);
5224
5225         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5226                                    true);
5227         if (ret) {
5228                 dev_err(&hdev->pdev->dev,
5229                         "fd key_y config fail, loc=%u, ret=%d\n",
5230                         rule->queue_id, ret);
5231                 return ret;
5232         }
5233
5234         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5235                                    true);
5236         if (ret)
5237                 dev_err(&hdev->pdev->dev,
5238                         "fd key_x config fail, loc=%u, ret=%d\n",
5239                         rule->queue_id, ret);
5240         return ret;
5241 }
5242
5243 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5244                                struct hclge_fd_rule *rule)
5245 {
5246         struct hclge_fd_ad_data ad_data;
5247
5248         ad_data.ad_id = rule->location;
5249
5250         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5251                 ad_data.drop_packet = true;
5252                 ad_data.forward_to_direct_queue = false;
5253                 ad_data.queue_id = 0;
5254         } else {
5255                 ad_data.drop_packet = false;
5256                 ad_data.forward_to_direct_queue = true;
5257                 ad_data.queue_id = rule->queue_id;
5258         }
5259
5260         ad_data.use_counter = false;
5261         ad_data.counter_id = 0;
5262
5263         ad_data.use_next_stage = false;
5264         ad_data.next_input_key = 0;
5265
5266         ad_data.write_rule_id_to_bd = true;
5267         ad_data.rule_id = rule->location;
5268
5269         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5270 }
5271
5272 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5273                                struct ethtool_rx_flow_spec *fs, u32 *unused)
5274 {
5275         struct ethtool_tcpip4_spec *tcp_ip4_spec;
5276         struct ethtool_usrip4_spec *usr_ip4_spec;
5277         struct ethtool_tcpip6_spec *tcp_ip6_spec;
5278         struct ethtool_usrip6_spec *usr_ip6_spec;
5279         struct ethhdr *ether_spec;
5280
5281         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5282                 return -EINVAL;
5283
5284         if (!(fs->flow_type & hdev->fd_cfg.proto_support))
5285                 return -EOPNOTSUPP;
5286
5287         if ((fs->flow_type & FLOW_EXT) &&
5288             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5289                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5290                 return -EOPNOTSUPP;
5291         }
5292
5293         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5294         case SCTP_V4_FLOW:
5295         case TCP_V4_FLOW:
5296         case UDP_V4_FLOW:
5297                 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
5298                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5299
5300                 if (!tcp_ip4_spec->ip4src)
5301                         *unused |= BIT(INNER_SRC_IP);
5302
5303                 if (!tcp_ip4_spec->ip4dst)
5304                         *unused |= BIT(INNER_DST_IP);
5305
5306                 if (!tcp_ip4_spec->psrc)
5307                         *unused |= BIT(INNER_SRC_PORT);
5308
5309                 if (!tcp_ip4_spec->pdst)
5310                         *unused |= BIT(INNER_DST_PORT);
5311
5312                 if (!tcp_ip4_spec->tos)
5313                         *unused |= BIT(INNER_IP_TOS);
5314
5315                 break;
5316         case IP_USER_FLOW:
5317                 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
5318                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5319                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5320
5321                 if (!usr_ip4_spec->ip4src)
5322                         *unused |= BIT(INNER_SRC_IP);
5323
5324                 if (!usr_ip4_spec->ip4dst)
5325                         *unused |= BIT(INNER_DST_IP);
5326
5327                 if (!usr_ip4_spec->tos)
5328                         *unused |= BIT(INNER_IP_TOS);
5329
5330                 if (!usr_ip4_spec->proto)
5331                         *unused |= BIT(INNER_IP_PROTO);
5332
5333                 if (usr_ip4_spec->l4_4_bytes)
5334                         return -EOPNOTSUPP;
5335
5336                 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
5337                         return -EOPNOTSUPP;
5338
5339                 break;
5340         case SCTP_V6_FLOW:
5341         case TCP_V6_FLOW:
5342         case UDP_V6_FLOW:
5343                 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
5344                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5345                         BIT(INNER_IP_TOS);
5346
5347                 /* check whether src/dst ip address used */
5348                 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
5349                     !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
5350                         *unused |= BIT(INNER_SRC_IP);
5351
5352                 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
5353                     !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
5354                         *unused |= BIT(INNER_DST_IP);
5355
5356                 if (!tcp_ip6_spec->psrc)
5357                         *unused |= BIT(INNER_SRC_PORT);
5358
5359                 if (!tcp_ip6_spec->pdst)
5360                         *unused |= BIT(INNER_DST_PORT);
5361
5362                 if (tcp_ip6_spec->tclass)
5363                         return -EOPNOTSUPP;
5364
5365                 break;
5366         case IPV6_USER_FLOW:
5367                 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
5368                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5369                         BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
5370                         BIT(INNER_DST_PORT);
5371
5372                 /* check whether src/dst ip address used */
5373                 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
5374                     !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
5375                         *unused |= BIT(INNER_SRC_IP);
5376
5377                 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
5378                     !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
5379                         *unused |= BIT(INNER_DST_IP);
5380
5381                 if (!usr_ip6_spec->l4_proto)
5382                         *unused |= BIT(INNER_IP_PROTO);
5383
5384                 if (usr_ip6_spec->tclass)
5385                         return -EOPNOTSUPP;
5386
5387                 if (usr_ip6_spec->l4_4_bytes)
5388                         return -EOPNOTSUPP;
5389
5390                 break;
5391         case ETHER_FLOW:
5392                 ether_spec = &fs->h_u.ether_spec;
5393                 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5394                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5395                         BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5396
5397                 if (is_zero_ether_addr(ether_spec->h_source))
5398                         *unused |= BIT(INNER_SRC_MAC);
5399
5400                 if (is_zero_ether_addr(ether_spec->h_dest))
5401                         *unused |= BIT(INNER_DST_MAC);
5402
5403                 if (!ether_spec->h_proto)
5404                         *unused |= BIT(INNER_ETH_TYPE);
5405
5406                 break;
5407         default:
5408                 return -EOPNOTSUPP;
5409         }
5410
5411         if ((fs->flow_type & FLOW_EXT)) {
5412                 if (fs->h_ext.vlan_etype)
5413                         return -EOPNOTSUPP;
5414                 if (!fs->h_ext.vlan_tci)
5415                         *unused |= BIT(INNER_VLAN_TAG_FST);
5416
5417                 if (fs->m_ext.vlan_tci) {
5418                         if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5419                                 return -EINVAL;
5420                 }
5421         } else {
5422                 *unused |= BIT(INNER_VLAN_TAG_FST);
5423         }
5424
5425         if (fs->flow_type & FLOW_MAC_EXT) {
5426                 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5427                         return -EOPNOTSUPP;
5428
5429                 if (is_zero_ether_addr(fs->h_ext.h_dest))
5430                         *unused |= BIT(INNER_DST_MAC);
5431                 else
5432                         *unused &= ~(BIT(INNER_DST_MAC));
5433         }
5434
5435         return 0;
5436 }
5437
5438 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5439 {
5440         struct hclge_fd_rule *rule = NULL;
5441         struct hlist_node *node2;
5442
5443         spin_lock_bh(&hdev->fd_rule_lock);
5444         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5445                 if (rule->location >= location)
5446                         break;
5447         }
5448
5449         spin_unlock_bh(&hdev->fd_rule_lock);
5450
5451         return  rule && rule->location == location;
5452 }
5453
5454 /* make sure being called after lock up with fd_rule_lock */
5455 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5456                                      struct hclge_fd_rule *new_rule,
5457                                      u16 location,
5458                                      bool is_add)
5459 {
5460         struct hclge_fd_rule *rule = NULL, *parent = NULL;
5461         struct hlist_node *node2;
5462
5463         if (is_add && !new_rule)
5464                 return -EINVAL;
5465
5466         hlist_for_each_entry_safe(rule, node2,
5467                                   &hdev->fd_rule_list, rule_node) {
5468                 if (rule->location >= location)
5469                         break;
5470                 parent = rule;
5471         }
5472
5473         if (rule && rule->location == location) {
5474                 hlist_del(&rule->rule_node);
5475                 kfree(rule);
5476                 hdev->hclge_fd_rule_num--;
5477
5478                 if (!is_add) {
5479                         if (!hdev->hclge_fd_rule_num)
5480                                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5481                         clear_bit(location, hdev->fd_bmap);
5482
5483                         return 0;
5484                 }
5485         } else if (!is_add) {
5486                 dev_err(&hdev->pdev->dev,
5487                         "delete fail, rule %u is inexistent\n",
5488                         location);
5489                 return -EINVAL;
5490         }
5491
5492         INIT_HLIST_NODE(&new_rule->rule_node);
5493
5494         if (parent)
5495                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5496         else
5497                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5498
5499         set_bit(location, hdev->fd_bmap);
5500         hdev->hclge_fd_rule_num++;
5501         hdev->fd_active_type = new_rule->rule_type;
5502
5503         return 0;
5504 }
5505
5506 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5507                               struct ethtool_rx_flow_spec *fs,
5508                               struct hclge_fd_rule *rule)
5509 {
5510         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5511
5512         switch (flow_type) {
5513         case SCTP_V4_FLOW:
5514         case TCP_V4_FLOW:
5515         case UDP_V4_FLOW:
5516                 rule->tuples.src_ip[IPV4_INDEX] =
5517                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5518                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5519                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5520
5521                 rule->tuples.dst_ip[IPV4_INDEX] =
5522                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5523                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5524                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5525
5526                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5527                 rule->tuples_mask.src_port =
5528                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5529
5530                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5531                 rule->tuples_mask.dst_port =
5532                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5533
5534                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5535                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5536
5537                 rule->tuples.ether_proto = ETH_P_IP;
5538                 rule->tuples_mask.ether_proto = 0xFFFF;
5539
5540                 break;
5541         case IP_USER_FLOW:
5542                 rule->tuples.src_ip[IPV4_INDEX] =
5543                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5544                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5545                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5546
5547                 rule->tuples.dst_ip[IPV4_INDEX] =
5548                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5549                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5550                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5551
5552                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5553                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5554
5555                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5556                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5557
5558                 rule->tuples.ether_proto = ETH_P_IP;
5559                 rule->tuples_mask.ether_proto = 0xFFFF;
5560
5561                 break;
5562         case SCTP_V6_FLOW:
5563         case TCP_V6_FLOW:
5564         case UDP_V6_FLOW:
5565                 be32_to_cpu_array(rule->tuples.src_ip,
5566                                   fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5567                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5568                                   fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5569
5570                 be32_to_cpu_array(rule->tuples.dst_ip,
5571                                   fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5572                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5573                                   fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5574
5575                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5576                 rule->tuples_mask.src_port =
5577                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5578
5579                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5580                 rule->tuples_mask.dst_port =
5581                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5582
5583                 rule->tuples.ether_proto = ETH_P_IPV6;
5584                 rule->tuples_mask.ether_proto = 0xFFFF;
5585
5586                 break;
5587         case IPV6_USER_FLOW:
5588                 be32_to_cpu_array(rule->tuples.src_ip,
5589                                   fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5590                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5591                                   fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5592
5593                 be32_to_cpu_array(rule->tuples.dst_ip,
5594                                   fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5595                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5596                                   fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5597
5598                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5599                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5600
5601                 rule->tuples.ether_proto = ETH_P_IPV6;
5602                 rule->tuples_mask.ether_proto = 0xFFFF;
5603
5604                 break;
5605         case ETHER_FLOW:
5606                 ether_addr_copy(rule->tuples.src_mac,
5607                                 fs->h_u.ether_spec.h_source);
5608                 ether_addr_copy(rule->tuples_mask.src_mac,
5609                                 fs->m_u.ether_spec.h_source);
5610
5611                 ether_addr_copy(rule->tuples.dst_mac,
5612                                 fs->h_u.ether_spec.h_dest);
5613                 ether_addr_copy(rule->tuples_mask.dst_mac,
5614                                 fs->m_u.ether_spec.h_dest);
5615
5616                 rule->tuples.ether_proto =
5617                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5618                 rule->tuples_mask.ether_proto =
5619                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5620
5621                 break;
5622         default:
5623                 return -EOPNOTSUPP;
5624         }
5625
5626         switch (flow_type) {
5627         case SCTP_V4_FLOW:
5628         case SCTP_V6_FLOW:
5629                 rule->tuples.ip_proto = IPPROTO_SCTP;
5630                 rule->tuples_mask.ip_proto = 0xFF;
5631                 break;
5632         case TCP_V4_FLOW:
5633         case TCP_V6_FLOW:
5634                 rule->tuples.ip_proto = IPPROTO_TCP;
5635                 rule->tuples_mask.ip_proto = 0xFF;
5636                 break;
5637         case UDP_V4_FLOW:
5638         case UDP_V6_FLOW:
5639                 rule->tuples.ip_proto = IPPROTO_UDP;
5640                 rule->tuples_mask.ip_proto = 0xFF;
5641                 break;
5642         default:
5643                 break;
5644         }
5645
5646         if ((fs->flow_type & FLOW_EXT)) {
5647                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5648                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5649         }
5650
5651         if (fs->flow_type & FLOW_MAC_EXT) {
5652                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5653                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5654         }
5655
5656         return 0;
5657 }
5658
5659 /* make sure being called after lock up with fd_rule_lock */
5660 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5661                                 struct hclge_fd_rule *rule)
5662 {
5663         int ret;
5664
5665         if (!rule) {
5666                 dev_err(&hdev->pdev->dev,
5667                         "The flow director rule is NULL\n");
5668                 return -EINVAL;
5669         }
5670
5671         /* it will never fail here, so needn't to check return value */
5672         hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5673
5674         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5675         if (ret)
5676                 goto clear_rule;
5677
5678         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5679         if (ret)
5680                 goto clear_rule;
5681
5682         return 0;
5683
5684 clear_rule:
5685         hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5686         return ret;
5687 }
5688
5689 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5690                               struct ethtool_rxnfc *cmd)
5691 {
5692         struct hclge_vport *vport = hclge_get_vport(handle);
5693         struct hclge_dev *hdev = vport->back;
5694         u16 dst_vport_id = 0, q_index = 0;
5695         struct ethtool_rx_flow_spec *fs;
5696         struct hclge_fd_rule *rule;
5697         u32 unused = 0;
5698         u8 action;
5699         int ret;
5700
5701         if (!hnae3_dev_fd_supported(hdev))
5702                 return -EOPNOTSUPP;
5703
5704         if (!hdev->fd_en) {
5705                 dev_warn(&hdev->pdev->dev,
5706                          "Please enable flow director first\n");
5707                 return -EOPNOTSUPP;
5708         }
5709
5710         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5711
5712         ret = hclge_fd_check_spec(hdev, fs, &unused);
5713         if (ret) {
5714                 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5715                 return ret;
5716         }
5717
5718         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5719                 action = HCLGE_FD_ACTION_DROP_PACKET;
5720         } else {
5721                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5722                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5723                 u16 tqps;
5724
5725                 if (vf > hdev->num_req_vfs) {
5726                         dev_err(&hdev->pdev->dev,
5727                                 "Error: vf id (%u) > max vf num (%u)\n",
5728                                 vf, hdev->num_req_vfs);
5729                         return -EINVAL;
5730                 }
5731
5732                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5733                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5734
5735                 if (ring >= tqps) {
5736                         dev_err(&hdev->pdev->dev,
5737                                 "Error: queue id (%u) > max tqp num (%u)\n",
5738                                 ring, tqps - 1);
5739                         return -EINVAL;
5740                 }
5741
5742                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5743                 q_index = ring;
5744         }
5745
5746         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5747         if (!rule)
5748                 return -ENOMEM;
5749
5750         ret = hclge_fd_get_tuple(hdev, fs, rule);
5751         if (ret) {
5752                 kfree(rule);
5753                 return ret;
5754         }
5755
5756         rule->flow_type = fs->flow_type;
5757
5758         rule->location = fs->location;
5759         rule->unused_tuple = unused;
5760         rule->vf_id = dst_vport_id;
5761         rule->queue_id = q_index;
5762         rule->action = action;
5763         rule->rule_type = HCLGE_FD_EP_ACTIVE;
5764
5765         /* to avoid rule conflict, when user configure rule by ethtool,
5766          * we need to clear all arfs rules
5767          */
5768         hclge_clear_arfs_rules(handle);
5769
5770         spin_lock_bh(&hdev->fd_rule_lock);
5771         ret = hclge_fd_config_rule(hdev, rule);
5772
5773         spin_unlock_bh(&hdev->fd_rule_lock);
5774
5775         return ret;
5776 }
5777
5778 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5779                               struct ethtool_rxnfc *cmd)
5780 {
5781         struct hclge_vport *vport = hclge_get_vport(handle);
5782         struct hclge_dev *hdev = vport->back;
5783         struct ethtool_rx_flow_spec *fs;
5784         int ret;
5785
5786         if (!hnae3_dev_fd_supported(hdev))
5787                 return -EOPNOTSUPP;
5788
5789         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5790
5791         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5792                 return -EINVAL;
5793
5794         if (!hclge_fd_rule_exist(hdev, fs->location)) {
5795                 dev_err(&hdev->pdev->dev,
5796                         "Delete fail, rule %u is inexistent\n", fs->location);
5797                 return -ENOENT;
5798         }
5799
5800         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5801                                    NULL, false);
5802         if (ret)
5803                 return ret;
5804
5805         spin_lock_bh(&hdev->fd_rule_lock);
5806         ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5807
5808         spin_unlock_bh(&hdev->fd_rule_lock);
5809
5810         return ret;
5811 }
5812
5813 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5814                                      bool clear_list)
5815 {
5816         struct hclge_vport *vport = hclge_get_vport(handle);
5817         struct hclge_dev *hdev = vport->back;
5818         struct hclge_fd_rule *rule;
5819         struct hlist_node *node;
5820         u16 location;
5821
5822         if (!hnae3_dev_fd_supported(hdev))
5823                 return;
5824
5825         spin_lock_bh(&hdev->fd_rule_lock);
5826         for_each_set_bit(location, hdev->fd_bmap,
5827                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5828                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5829                                      NULL, false);
5830
5831         if (clear_list) {
5832                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5833                                           rule_node) {
5834                         hlist_del(&rule->rule_node);
5835                         kfree(rule);
5836                 }
5837                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5838                 hdev->hclge_fd_rule_num = 0;
5839                 bitmap_zero(hdev->fd_bmap,
5840                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5841         }
5842
5843         spin_unlock_bh(&hdev->fd_rule_lock);
5844 }
5845
5846 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5847 {
5848         struct hclge_vport *vport = hclge_get_vport(handle);
5849         struct hclge_dev *hdev = vport->back;
5850         struct hclge_fd_rule *rule;
5851         struct hlist_node *node;
5852         int ret;
5853
5854         /* Return ok here, because reset error handling will check this
5855          * return value. If error is returned here, the reset process will
5856          * fail.
5857          */
5858         if (!hnae3_dev_fd_supported(hdev))
5859                 return 0;
5860
5861         /* if fd is disabled, should not restore it when reset */
5862         if (!hdev->fd_en)
5863                 return 0;
5864
5865         spin_lock_bh(&hdev->fd_rule_lock);
5866         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5867                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5868                 if (!ret)
5869                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5870
5871                 if (ret) {
5872                         dev_warn(&hdev->pdev->dev,
5873                                  "Restore rule %u failed, remove it\n",
5874                                  rule->location);
5875                         clear_bit(rule->location, hdev->fd_bmap);
5876                         hlist_del(&rule->rule_node);
5877                         kfree(rule);
5878                         hdev->hclge_fd_rule_num--;
5879                 }
5880         }
5881
5882         if (hdev->hclge_fd_rule_num)
5883                 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5884
5885         spin_unlock_bh(&hdev->fd_rule_lock);
5886
5887         return 0;
5888 }
5889
5890 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5891                                  struct ethtool_rxnfc *cmd)
5892 {
5893         struct hclge_vport *vport = hclge_get_vport(handle);
5894         struct hclge_dev *hdev = vport->back;
5895
5896         if (!hnae3_dev_fd_supported(hdev))
5897                 return -EOPNOTSUPP;
5898
5899         cmd->rule_cnt = hdev->hclge_fd_rule_num;
5900         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5901
5902         return 0;
5903 }
5904
5905 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5906                                   struct ethtool_rxnfc *cmd)
5907 {
5908         struct hclge_vport *vport = hclge_get_vport(handle);
5909         struct hclge_fd_rule *rule = NULL;
5910         struct hclge_dev *hdev = vport->back;
5911         struct ethtool_rx_flow_spec *fs;
5912         struct hlist_node *node2;
5913
5914         if (!hnae3_dev_fd_supported(hdev))
5915                 return -EOPNOTSUPP;
5916
5917         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5918
5919         spin_lock_bh(&hdev->fd_rule_lock);
5920
5921         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5922                 if (rule->location >= fs->location)
5923                         break;
5924         }
5925
5926         if (!rule || fs->location != rule->location) {
5927                 spin_unlock_bh(&hdev->fd_rule_lock);
5928
5929                 return -ENOENT;
5930         }
5931
5932         fs->flow_type = rule->flow_type;
5933         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5934         case SCTP_V4_FLOW:
5935         case TCP_V4_FLOW:
5936         case UDP_V4_FLOW:
5937                 fs->h_u.tcp_ip4_spec.ip4src =
5938                                 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5939                 fs->m_u.tcp_ip4_spec.ip4src =
5940                         rule->unused_tuple & BIT(INNER_SRC_IP) ?
5941                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5942
5943                 fs->h_u.tcp_ip4_spec.ip4dst =
5944                                 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5945                 fs->m_u.tcp_ip4_spec.ip4dst =
5946                         rule->unused_tuple & BIT(INNER_DST_IP) ?
5947                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5948
5949                 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5950                 fs->m_u.tcp_ip4_spec.psrc =
5951                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5952                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5953
5954                 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5955                 fs->m_u.tcp_ip4_spec.pdst =
5956                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5957                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5958
5959                 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5960                 fs->m_u.tcp_ip4_spec.tos =
5961                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5962                                 0 : rule->tuples_mask.ip_tos;
5963
5964                 break;
5965         case IP_USER_FLOW:
5966                 fs->h_u.usr_ip4_spec.ip4src =
5967                                 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5968                 fs->m_u.tcp_ip4_spec.ip4src =
5969                         rule->unused_tuple & BIT(INNER_SRC_IP) ?
5970                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5971
5972                 fs->h_u.usr_ip4_spec.ip4dst =
5973                                 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5974                 fs->m_u.usr_ip4_spec.ip4dst =
5975                         rule->unused_tuple & BIT(INNER_DST_IP) ?
5976                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5977
5978                 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5979                 fs->m_u.usr_ip4_spec.tos =
5980                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5981                                 0 : rule->tuples_mask.ip_tos;
5982
5983                 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5984                 fs->m_u.usr_ip4_spec.proto =
5985                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5986                                 0 : rule->tuples_mask.ip_proto;
5987
5988                 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5989
5990                 break;
5991         case SCTP_V6_FLOW:
5992         case TCP_V6_FLOW:
5993         case UDP_V6_FLOW:
5994                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5995                                   rule->tuples.src_ip, IPV6_SIZE);
5996                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5997                         memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5998                                sizeof(int) * IPV6_SIZE);
5999                 else
6000                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
6001                                           rule->tuples_mask.src_ip, IPV6_SIZE);
6002
6003                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
6004                                   rule->tuples.dst_ip, IPV6_SIZE);
6005                 if (rule->unused_tuple & BIT(INNER_DST_IP))
6006                         memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
6007                                sizeof(int) * IPV6_SIZE);
6008                 else
6009                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
6010                                           rule->tuples_mask.dst_ip, IPV6_SIZE);
6011
6012                 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
6013                 fs->m_u.tcp_ip6_spec.psrc =
6014                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6015                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
6016
6017                 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
6018                 fs->m_u.tcp_ip6_spec.pdst =
6019                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
6020                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6021
6022                 break;
6023         case IPV6_USER_FLOW:
6024                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
6025                                   rule->tuples.src_ip, IPV6_SIZE);
6026                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6027                         memset(fs->m_u.usr_ip6_spec.ip6src, 0,
6028                                sizeof(int) * IPV6_SIZE);
6029                 else
6030                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
6031                                           rule->tuples_mask.src_ip, IPV6_SIZE);
6032
6033                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
6034                                   rule->tuples.dst_ip, IPV6_SIZE);
6035                 if (rule->unused_tuple & BIT(INNER_DST_IP))
6036                         memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
6037                                sizeof(int) * IPV6_SIZE);
6038                 else
6039                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
6040                                           rule->tuples_mask.dst_ip, IPV6_SIZE);
6041
6042                 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
6043                 fs->m_u.usr_ip6_spec.l4_proto =
6044                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6045                                 0 : rule->tuples_mask.ip_proto;
6046
6047                 break;
6048         case ETHER_FLOW:
6049                 ether_addr_copy(fs->h_u.ether_spec.h_source,
6050                                 rule->tuples.src_mac);
6051                 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6052                         eth_zero_addr(fs->m_u.ether_spec.h_source);
6053                 else
6054                         ether_addr_copy(fs->m_u.ether_spec.h_source,
6055                                         rule->tuples_mask.src_mac);
6056
6057                 ether_addr_copy(fs->h_u.ether_spec.h_dest,
6058                                 rule->tuples.dst_mac);
6059                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6060                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
6061                 else
6062                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
6063                                         rule->tuples_mask.dst_mac);
6064
6065                 fs->h_u.ether_spec.h_proto =
6066                                 cpu_to_be16(rule->tuples.ether_proto);
6067                 fs->m_u.ether_spec.h_proto =
6068                                 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6069                                 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6070
6071                 break;
6072         default:
6073                 spin_unlock_bh(&hdev->fd_rule_lock);
6074                 return -EOPNOTSUPP;
6075         }
6076
6077         if (fs->flow_type & FLOW_EXT) {
6078                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6079                 fs->m_ext.vlan_tci =
6080                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6081                                 cpu_to_be16(VLAN_VID_MASK) :
6082                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
6083         }
6084
6085         if (fs->flow_type & FLOW_MAC_EXT) {
6086                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6087                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6088                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
6089                 else
6090                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
6091                                         rule->tuples_mask.dst_mac);
6092         }
6093
6094         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6095                 fs->ring_cookie = RX_CLS_FLOW_DISC;
6096         } else {
6097                 u64 vf_id;
6098
6099                 fs->ring_cookie = rule->queue_id;
6100                 vf_id = rule->vf_id;
6101                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6102                 fs->ring_cookie |= vf_id;
6103         }
6104
6105         spin_unlock_bh(&hdev->fd_rule_lock);
6106
6107         return 0;
6108 }
6109
6110 static int hclge_get_all_rules(struct hnae3_handle *handle,
6111                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
6112 {
6113         struct hclge_vport *vport = hclge_get_vport(handle);
6114         struct hclge_dev *hdev = vport->back;
6115         struct hclge_fd_rule *rule;
6116         struct hlist_node *node2;
6117         int cnt = 0;
6118
6119         if (!hnae3_dev_fd_supported(hdev))
6120                 return -EOPNOTSUPP;
6121
6122         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6123
6124         spin_lock_bh(&hdev->fd_rule_lock);
6125         hlist_for_each_entry_safe(rule, node2,
6126                                   &hdev->fd_rule_list, rule_node) {
6127                 if (cnt == cmd->rule_cnt) {
6128                         spin_unlock_bh(&hdev->fd_rule_lock);
6129                         return -EMSGSIZE;
6130                 }
6131
6132                 rule_locs[cnt] = rule->location;
6133                 cnt++;
6134         }
6135
6136         spin_unlock_bh(&hdev->fd_rule_lock);
6137
6138         cmd->rule_cnt = cnt;
6139
6140         return 0;
6141 }
6142
6143 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6144                                      struct hclge_fd_rule_tuples *tuples)
6145 {
6146         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6147         tuples->ip_proto = fkeys->basic.ip_proto;
6148         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6149
6150         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6151                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6152                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6153         } else {
6154                 memcpy(tuples->src_ip,
6155                        fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
6156                        sizeof(tuples->src_ip));
6157                 memcpy(tuples->dst_ip,
6158                        fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
6159                        sizeof(tuples->dst_ip));
6160         }
6161 }
6162
6163 /* traverse all rules, check whether an existed rule has the same tuples */
6164 static struct hclge_fd_rule *
6165 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6166                           const struct hclge_fd_rule_tuples *tuples)
6167 {
6168         struct hclge_fd_rule *rule = NULL;
6169         struct hlist_node *node;
6170
6171         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6172                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6173                         return rule;
6174         }
6175
6176         return NULL;
6177 }
6178
6179 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6180                                      struct hclge_fd_rule *rule)
6181 {
6182         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6183                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6184                              BIT(INNER_SRC_PORT);
6185         rule->action = 0;
6186         rule->vf_id = 0;
6187         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6188         if (tuples->ether_proto == ETH_P_IP) {
6189                 if (tuples->ip_proto == IPPROTO_TCP)
6190                         rule->flow_type = TCP_V4_FLOW;
6191                 else
6192                         rule->flow_type = UDP_V4_FLOW;
6193         } else {
6194                 if (tuples->ip_proto == IPPROTO_TCP)
6195                         rule->flow_type = TCP_V6_FLOW;
6196                 else
6197                         rule->flow_type = UDP_V6_FLOW;
6198         }
6199         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6200         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6201 }
6202
6203 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6204                                       u16 flow_id, struct flow_keys *fkeys)
6205 {
6206         struct hclge_vport *vport = hclge_get_vport(handle);
6207         struct hclge_fd_rule_tuples new_tuples;
6208         struct hclge_dev *hdev = vport->back;
6209         struct hclge_fd_rule *rule;
6210         u16 tmp_queue_id;
6211         u16 bit_id;
6212         int ret;
6213
6214         if (!hnae3_dev_fd_supported(hdev))
6215                 return -EOPNOTSUPP;
6216
6217         memset(&new_tuples, 0, sizeof(new_tuples));
6218         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6219
6220         spin_lock_bh(&hdev->fd_rule_lock);
6221
6222         /* when there is already fd rule existed add by user,
6223          * arfs should not work
6224          */
6225         if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6226                 spin_unlock_bh(&hdev->fd_rule_lock);
6227
6228                 return -EOPNOTSUPP;
6229         }
6230
6231         /* check is there flow director filter existed for this flow,
6232          * if not, create a new filter for it;
6233          * if filter exist with different queue id, modify the filter;
6234          * if filter exist with same queue id, do nothing
6235          */
6236         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6237         if (!rule) {
6238                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6239                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6240                         spin_unlock_bh(&hdev->fd_rule_lock);
6241
6242                         return -ENOSPC;
6243                 }
6244
6245                 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6246                 if (!rule) {
6247                         spin_unlock_bh(&hdev->fd_rule_lock);
6248
6249                         return -ENOMEM;
6250                 }
6251
6252                 set_bit(bit_id, hdev->fd_bmap);
6253                 rule->location = bit_id;
6254                 rule->flow_id = flow_id;
6255                 rule->queue_id = queue_id;
6256                 hclge_fd_build_arfs_rule(&new_tuples, rule);
6257                 ret = hclge_fd_config_rule(hdev, rule);
6258
6259                 spin_unlock_bh(&hdev->fd_rule_lock);
6260
6261                 if (ret)
6262                         return ret;
6263
6264                 return rule->location;
6265         }
6266
6267         spin_unlock_bh(&hdev->fd_rule_lock);
6268
6269         if (rule->queue_id == queue_id)
6270                 return rule->location;
6271
6272         tmp_queue_id = rule->queue_id;
6273         rule->queue_id = queue_id;
6274         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6275         if (ret) {
6276                 rule->queue_id = tmp_queue_id;
6277                 return ret;
6278         }
6279
6280         return rule->location;
6281 }
6282
6283 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6284 {
6285 #ifdef CONFIG_RFS_ACCEL
6286         struct hnae3_handle *handle = &hdev->vport[0].nic;
6287         struct hclge_fd_rule *rule;
6288         struct hlist_node *node;
6289         HLIST_HEAD(del_list);
6290
6291         spin_lock_bh(&hdev->fd_rule_lock);
6292         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6293                 spin_unlock_bh(&hdev->fd_rule_lock);
6294                 return;
6295         }
6296         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6297                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6298                                         rule->flow_id, rule->location)) {
6299                         hlist_del_init(&rule->rule_node);
6300                         hlist_add_head(&rule->rule_node, &del_list);
6301                         hdev->hclge_fd_rule_num--;
6302                         clear_bit(rule->location, hdev->fd_bmap);
6303                 }
6304         }
6305         spin_unlock_bh(&hdev->fd_rule_lock);
6306
6307         hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6308                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6309                                      rule->location, NULL, false);
6310                 kfree(rule);
6311         }
6312 #endif
6313 }
6314
6315 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6316 {
6317 #ifdef CONFIG_RFS_ACCEL
6318         struct hclge_vport *vport = hclge_get_vport(handle);
6319         struct hclge_dev *hdev = vport->back;
6320
6321         if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6322                 hclge_del_all_fd_entries(handle, true);
6323 #endif
6324 }
6325
6326 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6327 {
6328         struct hclge_vport *vport = hclge_get_vport(handle);
6329         struct hclge_dev *hdev = vport->back;
6330
6331         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6332                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6333 }
6334
6335 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6336 {
6337         struct hclge_vport *vport = hclge_get_vport(handle);
6338         struct hclge_dev *hdev = vport->back;
6339
6340         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6341 }
6342
6343 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6344 {
6345         struct hclge_vport *vport = hclge_get_vport(handle);
6346         struct hclge_dev *hdev = vport->back;
6347
6348         return hdev->rst_stats.hw_reset_done_cnt;
6349 }
6350
6351 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6352 {
6353         struct hclge_vport *vport = hclge_get_vport(handle);
6354         struct hclge_dev *hdev = vport->back;
6355         bool clear;
6356
6357         hdev->fd_en = enable;
6358         clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6359         if (!enable)
6360                 hclge_del_all_fd_entries(handle, clear);
6361         else
6362                 hclge_restore_fd_entries(handle);
6363 }
6364
6365 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6366 {
6367         struct hclge_desc desc;
6368         struct hclge_config_mac_mode_cmd *req =
6369                 (struct hclge_config_mac_mode_cmd *)desc.data;
6370         u32 loop_en = 0;
6371         int ret;
6372
6373         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6374
6375         if (enable) {
6376                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6377                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6378                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6379                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6380                 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6381                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6382                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6383                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6384                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6385                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6386         }
6387
6388         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6389
6390         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6391         if (ret)
6392                 dev_err(&hdev->pdev->dev,
6393                         "mac enable fail, ret =%d.\n", ret);
6394 }
6395
6396 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6397                                      u8 switch_param, u8 param_mask)
6398 {
6399         struct hclge_mac_vlan_switch_cmd *req;
6400         struct hclge_desc desc;
6401         u32 func_id;
6402         int ret;
6403
6404         func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6405         req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6406
6407         /* read current config parameter */
6408         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6409                                    true);
6410         req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6411         req->func_id = cpu_to_le32(func_id);
6412
6413         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6414         if (ret) {
6415                 dev_err(&hdev->pdev->dev,
6416                         "read mac vlan switch parameter fail, ret = %d\n", ret);
6417                 return ret;
6418         }
6419
6420         /* modify and write new config parameter */
6421         hclge_cmd_reuse_desc(&desc, false);
6422         req->switch_param = (req->switch_param & param_mask) | switch_param;
6423         req->param_mask = param_mask;
6424
6425         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6426         if (ret)
6427                 dev_err(&hdev->pdev->dev,
6428                         "set mac vlan switch parameter fail, ret = %d\n", ret);
6429         return ret;
6430 }
6431
6432 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6433                                        int link_ret)
6434 {
6435 #define HCLGE_PHY_LINK_STATUS_NUM  200
6436
6437         struct phy_device *phydev = hdev->hw.mac.phydev;
6438         int i = 0;
6439         int ret;
6440
6441         do {
6442                 ret = phy_read_status(phydev);
6443                 if (ret) {
6444                         dev_err(&hdev->pdev->dev,
6445                                 "phy update link status fail, ret = %d\n", ret);
6446                         return;
6447                 }
6448
6449                 if (phydev->link == link_ret)
6450                         break;
6451
6452                 msleep(HCLGE_LINK_STATUS_MS);
6453         } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6454 }
6455
6456 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6457 {
6458 #define HCLGE_MAC_LINK_STATUS_NUM  100
6459
6460         int i = 0;
6461         int ret;
6462
6463         do {
6464                 ret = hclge_get_mac_link_status(hdev);
6465                 if (ret < 0)
6466                         return ret;
6467                 else if (ret == link_ret)
6468                         return 0;
6469
6470                 msleep(HCLGE_LINK_STATUS_MS);
6471         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6472         return -EBUSY;
6473 }
6474
6475 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6476                                           bool is_phy)
6477 {
6478 #define HCLGE_LINK_STATUS_DOWN 0
6479 #define HCLGE_LINK_STATUS_UP   1
6480
6481         int link_ret;
6482
6483         link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6484
6485         if (is_phy)
6486                 hclge_phy_link_status_wait(hdev, link_ret);
6487
6488         return hclge_mac_link_status_wait(hdev, link_ret);
6489 }
6490
6491 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6492 {
6493         struct hclge_config_mac_mode_cmd *req;
6494         struct hclge_desc desc;
6495         u32 loop_en;
6496         int ret;
6497
6498         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6499         /* 1 Read out the MAC mode config at first */
6500         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6501         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6502         if (ret) {
6503                 dev_err(&hdev->pdev->dev,
6504                         "mac loopback get fail, ret =%d.\n", ret);
6505                 return ret;
6506         }
6507
6508         /* 2 Then setup the loopback flag */
6509         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6510         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6511         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6512         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6513
6514         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6515
6516         /* 3 Config mac work mode with loopback flag
6517          * and its original configure parameters
6518          */
6519         hclge_cmd_reuse_desc(&desc, false);
6520         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6521         if (ret)
6522                 dev_err(&hdev->pdev->dev,
6523                         "mac loopback set fail, ret =%d.\n", ret);
6524         return ret;
6525 }
6526
6527 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6528                                      enum hnae3_loop loop_mode)
6529 {
6530 #define HCLGE_SERDES_RETRY_MS   10
6531 #define HCLGE_SERDES_RETRY_NUM  100
6532
6533         struct hclge_serdes_lb_cmd *req;
6534         struct hclge_desc desc;
6535         int ret, i = 0;
6536         u8 loop_mode_b;
6537
6538         req = (struct hclge_serdes_lb_cmd *)desc.data;
6539         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6540
6541         switch (loop_mode) {
6542         case HNAE3_LOOP_SERIAL_SERDES:
6543                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6544                 break;
6545         case HNAE3_LOOP_PARALLEL_SERDES:
6546                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6547                 break;
6548         default:
6549                 dev_err(&hdev->pdev->dev,
6550                         "unsupported serdes loopback mode %d\n", loop_mode);
6551                 return -ENOTSUPP;
6552         }
6553
6554         if (en) {
6555                 req->enable = loop_mode_b;
6556                 req->mask = loop_mode_b;
6557         } else {
6558                 req->mask = loop_mode_b;
6559         }
6560
6561         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6562         if (ret) {
6563                 dev_err(&hdev->pdev->dev,
6564                         "serdes loopback set fail, ret = %d\n", ret);
6565                 return ret;
6566         }
6567
6568         do {
6569                 msleep(HCLGE_SERDES_RETRY_MS);
6570                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6571                                            true);
6572                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6573                 if (ret) {
6574                         dev_err(&hdev->pdev->dev,
6575                                 "serdes loopback get, ret = %d\n", ret);
6576                         return ret;
6577                 }
6578         } while (++i < HCLGE_SERDES_RETRY_NUM &&
6579                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
6580
6581         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6582                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6583                 return -EBUSY;
6584         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6585                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6586                 return -EIO;
6587         }
6588         return ret;
6589 }
6590
6591 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6592                                      enum hnae3_loop loop_mode)
6593 {
6594         int ret;
6595
6596         ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6597         if (ret)
6598                 return ret;
6599
6600         hclge_cfg_mac_mode(hdev, en);
6601
6602         ret = hclge_mac_phy_link_status_wait(hdev, en, FALSE);
6603         if (ret)
6604                 dev_err(&hdev->pdev->dev,
6605                         "serdes loopback config mac mode timeout\n");
6606
6607         return ret;
6608 }
6609
6610 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6611                                      struct phy_device *phydev)
6612 {
6613         int ret;
6614
6615         if (!phydev->suspended) {
6616                 ret = phy_suspend(phydev);
6617                 if (ret)
6618                         return ret;
6619         }
6620
6621         ret = phy_resume(phydev);
6622         if (ret)
6623                 return ret;
6624
6625         return phy_loopback(phydev, true);
6626 }
6627
6628 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6629                                       struct phy_device *phydev)
6630 {
6631         int ret;
6632
6633         ret = phy_loopback(phydev, false);
6634         if (ret)
6635                 return ret;
6636
6637         return phy_suspend(phydev);
6638 }
6639
6640 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6641 {
6642         struct phy_device *phydev = hdev->hw.mac.phydev;
6643         int ret;
6644
6645         if (!phydev)
6646                 return -ENOTSUPP;
6647
6648         if (en)
6649                 ret = hclge_enable_phy_loopback(hdev, phydev);
6650         else
6651                 ret = hclge_disable_phy_loopback(hdev, phydev);
6652         if (ret) {
6653                 dev_err(&hdev->pdev->dev,
6654                         "set phy loopback fail, ret = %d\n", ret);
6655                 return ret;
6656         }
6657
6658         hclge_cfg_mac_mode(hdev, en);
6659
6660         ret = hclge_mac_phy_link_status_wait(hdev, en, TRUE);
6661         if (ret)
6662                 dev_err(&hdev->pdev->dev,
6663                         "phy loopback config mac mode timeout\n");
6664
6665         return ret;
6666 }
6667
6668 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6669                             int stream_id, bool enable)
6670 {
6671         struct hclge_desc desc;
6672         struct hclge_cfg_com_tqp_queue_cmd *req =
6673                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6674         int ret;
6675
6676         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6677         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6678         req->stream_id = cpu_to_le16(stream_id);
6679         if (enable)
6680                 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6681
6682         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6683         if (ret)
6684                 dev_err(&hdev->pdev->dev,
6685                         "Tqp enable fail, status =%d.\n", ret);
6686         return ret;
6687 }
6688
6689 static int hclge_set_loopback(struct hnae3_handle *handle,
6690                               enum hnae3_loop loop_mode, bool en)
6691 {
6692         struct hclge_vport *vport = hclge_get_vport(handle);
6693         struct hnae3_knic_private_info *kinfo;
6694         struct hclge_dev *hdev = vport->back;
6695         int i, ret;
6696
6697         /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6698          * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6699          * the same, the packets are looped back in the SSU. If SSU loopback
6700          * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6701          */
6702         if (hdev->pdev->revision >= 0x21) {
6703                 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6704
6705                 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6706                                                 HCLGE_SWITCH_ALW_LPBK_MASK);
6707                 if (ret)
6708                         return ret;
6709         }
6710
6711         switch (loop_mode) {
6712         case HNAE3_LOOP_APP:
6713                 ret = hclge_set_app_loopback(hdev, en);
6714                 break;
6715         case HNAE3_LOOP_SERIAL_SERDES:
6716         case HNAE3_LOOP_PARALLEL_SERDES:
6717                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6718                 break;
6719         case HNAE3_LOOP_PHY:
6720                 ret = hclge_set_phy_loopback(hdev, en);
6721                 break;
6722         default:
6723                 ret = -ENOTSUPP;
6724                 dev_err(&hdev->pdev->dev,
6725                         "loop_mode %d is not supported\n", loop_mode);
6726                 break;
6727         }
6728
6729         if (ret)
6730                 return ret;
6731
6732         kinfo = &vport->nic.kinfo;
6733         for (i = 0; i < kinfo->num_tqps; i++) {
6734                 ret = hclge_tqp_enable(hdev, i, 0, en);
6735                 if (ret)
6736                         return ret;
6737         }
6738
6739         return 0;
6740 }
6741
6742 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6743 {
6744         int ret;
6745
6746         ret = hclge_set_app_loopback(hdev, false);
6747         if (ret)
6748                 return ret;
6749
6750         ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6751         if (ret)
6752                 return ret;
6753
6754         return hclge_cfg_serdes_loopback(hdev, false,
6755                                          HNAE3_LOOP_PARALLEL_SERDES);
6756 }
6757
6758 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6759 {
6760         struct hclge_vport *vport = hclge_get_vport(handle);
6761         struct hnae3_knic_private_info *kinfo;
6762         struct hnae3_queue *queue;
6763         struct hclge_tqp *tqp;
6764         int i;
6765
6766         kinfo = &vport->nic.kinfo;
6767         for (i = 0; i < kinfo->num_tqps; i++) {
6768                 queue = handle->kinfo.tqp[i];
6769                 tqp = container_of(queue, struct hclge_tqp, q);
6770                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6771         }
6772 }
6773
6774 static void hclge_flush_link_update(struct hclge_dev *hdev)
6775 {
6776 #define HCLGE_FLUSH_LINK_TIMEOUT        100000
6777
6778         unsigned long last = hdev->serv_processed_cnt;
6779         int i = 0;
6780
6781         while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6782                i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6783                last == hdev->serv_processed_cnt)
6784                 usleep_range(1, 1);
6785 }
6786
6787 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6788 {
6789         struct hclge_vport *vport = hclge_get_vport(handle);
6790         struct hclge_dev *hdev = vport->back;
6791
6792         if (enable) {
6793                 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
6794         } else {
6795                 /* Set the DOWN flag here to disable link updating */
6796                 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6797
6798                 /* flush memory to make sure DOWN is seen by service task */
6799                 smp_mb__before_atomic();
6800                 hclge_flush_link_update(hdev);
6801         }
6802 }
6803
6804 static int hclge_ae_start(struct hnae3_handle *handle)
6805 {
6806         struct hclge_vport *vport = hclge_get_vport(handle);
6807         struct hclge_dev *hdev = vport->back;
6808
6809         /* mac enable */
6810         hclge_cfg_mac_mode(hdev, true);
6811         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6812         hdev->hw.mac.link = 0;
6813
6814         /* reset tqp stats */
6815         hclge_reset_tqp_stats(handle);
6816
6817         hclge_mac_start_phy(hdev);
6818
6819         return 0;
6820 }
6821
6822 static void hclge_ae_stop(struct hnae3_handle *handle)
6823 {
6824         struct hclge_vport *vport = hclge_get_vport(handle);
6825         struct hclge_dev *hdev = vport->back;
6826         int i;
6827
6828         set_bit(HCLGE_STATE_DOWN, &hdev->state);
6829
6830         hclge_clear_arfs_rules(handle);
6831
6832         /* If it is not PF reset, the firmware will disable the MAC,
6833          * so it only need to stop phy here.
6834          */
6835         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6836             hdev->reset_type != HNAE3_FUNC_RESET) {
6837                 hclge_mac_stop_phy(hdev);
6838                 hclge_update_link_status(hdev);
6839                 return;
6840         }
6841
6842         for (i = 0; i < handle->kinfo.num_tqps; i++)
6843                 hclge_reset_tqp(handle, i);
6844
6845         hclge_config_mac_tnl_int(hdev, false);
6846
6847         /* Mac disable */
6848         hclge_cfg_mac_mode(hdev, false);
6849
6850         hclge_mac_stop_phy(hdev);
6851
6852         /* reset tqp stats */
6853         hclge_reset_tqp_stats(handle);
6854         hclge_update_link_status(hdev);
6855 }
6856
6857 int hclge_vport_start(struct hclge_vport *vport)
6858 {
6859         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6860         vport->last_active_jiffies = jiffies;
6861         return 0;
6862 }
6863
6864 void hclge_vport_stop(struct hclge_vport *vport)
6865 {
6866         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6867 }
6868
6869 static int hclge_client_start(struct hnae3_handle *handle)
6870 {
6871         struct hclge_vport *vport = hclge_get_vport(handle);
6872
6873         return hclge_vport_start(vport);
6874 }
6875
6876 static void hclge_client_stop(struct hnae3_handle *handle)
6877 {
6878         struct hclge_vport *vport = hclge_get_vport(handle);
6879
6880         hclge_vport_stop(vport);
6881 }
6882
6883 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6884                                          u16 cmdq_resp, u8  resp_code,
6885                                          enum hclge_mac_vlan_tbl_opcode op)
6886 {
6887         struct hclge_dev *hdev = vport->back;
6888
6889         if (cmdq_resp) {
6890                 dev_err(&hdev->pdev->dev,
6891                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
6892                         cmdq_resp);
6893                 return -EIO;
6894         }
6895
6896         if (op == HCLGE_MAC_VLAN_ADD) {
6897                 if ((!resp_code) || (resp_code == 1)) {
6898                         return 0;
6899                 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6900                         dev_err(&hdev->pdev->dev,
6901                                 "add mac addr failed for uc_overflow.\n");
6902                         return -ENOSPC;
6903                 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6904                         dev_err(&hdev->pdev->dev,
6905                                 "add mac addr failed for mc_overflow.\n");
6906                         return -ENOSPC;
6907                 }
6908
6909                 dev_err(&hdev->pdev->dev,
6910                         "add mac addr failed for undefined, code=%u.\n",
6911                         resp_code);
6912                 return -EIO;
6913         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6914                 if (!resp_code) {
6915                         return 0;
6916                 } else if (resp_code == 1) {
6917                         dev_dbg(&hdev->pdev->dev,
6918                                 "remove mac addr failed for miss.\n");
6919                         return -ENOENT;
6920                 }
6921
6922                 dev_err(&hdev->pdev->dev,
6923                         "remove mac addr failed for undefined, code=%u.\n",
6924                         resp_code);
6925                 return -EIO;
6926         } else if (op == HCLGE_MAC_VLAN_LKUP) {
6927                 if (!resp_code) {
6928                         return 0;
6929                 } else if (resp_code == 1) {
6930                         dev_dbg(&hdev->pdev->dev,
6931                                 "lookup mac addr failed for miss.\n");
6932                         return -ENOENT;
6933                 }
6934
6935                 dev_err(&hdev->pdev->dev,
6936                         "lookup mac addr failed for undefined, code=%u.\n",
6937                         resp_code);
6938                 return -EIO;
6939         }
6940
6941         dev_err(&hdev->pdev->dev,
6942                 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
6943
6944         return -EINVAL;
6945 }
6946
6947 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6948 {
6949 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6950
6951         unsigned int word_num;
6952         unsigned int bit_num;
6953
6954         if (vfid > 255 || vfid < 0)
6955                 return -EIO;
6956
6957         if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6958                 word_num = vfid / 32;
6959                 bit_num  = vfid % 32;
6960                 if (clr)
6961                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6962                 else
6963                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6964         } else {
6965                 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6966                 bit_num  = vfid % 32;
6967                 if (clr)
6968                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6969                 else
6970                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6971         }
6972
6973         return 0;
6974 }
6975
6976 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6977 {
6978 #define HCLGE_DESC_NUMBER 3
6979 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6980         int i, j;
6981
6982         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6983                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6984                         if (desc[i].data[j])
6985                                 return false;
6986
6987         return true;
6988 }
6989
6990 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6991                                    const u8 *addr, bool is_mc)
6992 {
6993         const unsigned char *mac_addr = addr;
6994         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6995                        (mac_addr[0]) | (mac_addr[1] << 8);
6996         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
6997
6998         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6999         if (is_mc) {
7000                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7001                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7002         }
7003
7004         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7005         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7006 }
7007
7008 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7009                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
7010 {
7011         struct hclge_dev *hdev = vport->back;
7012         struct hclge_desc desc;
7013         u8 resp_code;
7014         u16 retval;
7015         int ret;
7016
7017         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7018
7019         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7020
7021         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7022         if (ret) {
7023                 dev_err(&hdev->pdev->dev,
7024                         "del mac addr failed for cmd_send, ret =%d.\n",
7025                         ret);
7026                 return ret;
7027         }
7028         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7029         retval = le16_to_cpu(desc.retval);
7030
7031         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7032                                              HCLGE_MAC_VLAN_REMOVE);
7033 }
7034
7035 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7036                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
7037                                      struct hclge_desc *desc,
7038                                      bool is_mc)
7039 {
7040         struct hclge_dev *hdev = vport->back;
7041         u8 resp_code;
7042         u16 retval;
7043         int ret;
7044
7045         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7046         if (is_mc) {
7047                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7048                 memcpy(desc[0].data,
7049                        req,
7050                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7051                 hclge_cmd_setup_basic_desc(&desc[1],
7052                                            HCLGE_OPC_MAC_VLAN_ADD,
7053                                            true);
7054                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7055                 hclge_cmd_setup_basic_desc(&desc[2],
7056                                            HCLGE_OPC_MAC_VLAN_ADD,
7057                                            true);
7058                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7059         } else {
7060                 memcpy(desc[0].data,
7061                        req,
7062                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7063                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7064         }
7065         if (ret) {
7066                 dev_err(&hdev->pdev->dev,
7067                         "lookup mac addr failed for cmd_send, ret =%d.\n",
7068                         ret);
7069                 return ret;
7070         }
7071         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7072         retval = le16_to_cpu(desc[0].retval);
7073
7074         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7075                                              HCLGE_MAC_VLAN_LKUP);
7076 }
7077
7078 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7079                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
7080                                   struct hclge_desc *mc_desc)
7081 {
7082         struct hclge_dev *hdev = vport->back;
7083         int cfg_status;
7084         u8 resp_code;
7085         u16 retval;
7086         int ret;
7087
7088         if (!mc_desc) {
7089                 struct hclge_desc desc;
7090
7091                 hclge_cmd_setup_basic_desc(&desc,
7092                                            HCLGE_OPC_MAC_VLAN_ADD,
7093                                            false);
7094                 memcpy(desc.data, req,
7095                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7096                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7097                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7098                 retval = le16_to_cpu(desc.retval);
7099
7100                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7101                                                            resp_code,
7102                                                            HCLGE_MAC_VLAN_ADD);
7103         } else {
7104                 hclge_cmd_reuse_desc(&mc_desc[0], false);
7105                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7106                 hclge_cmd_reuse_desc(&mc_desc[1], false);
7107                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7108                 hclge_cmd_reuse_desc(&mc_desc[2], false);
7109                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7110                 memcpy(mc_desc[0].data, req,
7111                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7112                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7113                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7114                 retval = le16_to_cpu(mc_desc[0].retval);
7115
7116                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7117                                                            resp_code,
7118                                                            HCLGE_MAC_VLAN_ADD);
7119         }
7120
7121         if (ret) {
7122                 dev_err(&hdev->pdev->dev,
7123                         "add mac addr failed for cmd_send, ret =%d.\n",
7124                         ret);
7125                 return ret;
7126         }
7127
7128         return cfg_status;
7129 }
7130
7131 static int hclge_init_umv_space(struct hclge_dev *hdev)
7132 {
7133         u16 allocated_size = 0;
7134         int ret;
7135
7136         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
7137                                   true);
7138         if (ret)
7139                 return ret;
7140
7141         if (allocated_size < hdev->wanted_umv_size)
7142                 dev_warn(&hdev->pdev->dev,
7143                          "Alloc umv space failed, want %u, get %u\n",
7144                          hdev->wanted_umv_size, allocated_size);
7145
7146         mutex_init(&hdev->umv_mutex);
7147         hdev->max_umv_size = allocated_size;
7148         /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
7149          * preserve some unicast mac vlan table entries shared by pf
7150          * and its vfs.
7151          */
7152         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
7153         hdev->share_umv_size = hdev->priv_umv_size +
7154                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
7155
7156         return 0;
7157 }
7158
7159 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
7160 {
7161         int ret;
7162
7163         if (hdev->max_umv_size > 0) {
7164                 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
7165                                           false);
7166                 if (ret)
7167                         return ret;
7168                 hdev->max_umv_size = 0;
7169         }
7170         mutex_destroy(&hdev->umv_mutex);
7171
7172         return 0;
7173 }
7174
7175 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7176                                u16 *allocated_size, bool is_alloc)
7177 {
7178         struct hclge_umv_spc_alc_cmd *req;
7179         struct hclge_desc desc;
7180         int ret;
7181
7182         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7183         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7184         if (!is_alloc)
7185                 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
7186
7187         req->space_size = cpu_to_le32(space_size);
7188
7189         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7190         if (ret) {
7191                 dev_err(&hdev->pdev->dev,
7192                         "%s umv space failed for cmd_send, ret =%d\n",
7193                         is_alloc ? "allocate" : "free", ret);
7194                 return ret;
7195         }
7196
7197         if (is_alloc && allocated_size)
7198                 *allocated_size = le32_to_cpu(desc.data[1]);
7199
7200         return 0;
7201 }
7202
7203 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7204 {
7205         struct hclge_vport *vport;
7206         int i;
7207
7208         for (i = 0; i < hdev->num_alloc_vport; i++) {
7209                 vport = &hdev->vport[i];
7210                 vport->used_umv_num = 0;
7211         }
7212
7213         mutex_lock(&hdev->umv_mutex);
7214         hdev->share_umv_size = hdev->priv_umv_size +
7215                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
7216         mutex_unlock(&hdev->umv_mutex);
7217 }
7218
7219 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
7220 {
7221         struct hclge_dev *hdev = vport->back;
7222         bool is_full;
7223
7224         mutex_lock(&hdev->umv_mutex);
7225         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7226                    hdev->share_umv_size == 0);
7227         mutex_unlock(&hdev->umv_mutex);
7228
7229         return is_full;
7230 }
7231
7232 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7233 {
7234         struct hclge_dev *hdev = vport->back;
7235
7236         mutex_lock(&hdev->umv_mutex);
7237         if (is_free) {
7238                 if (vport->used_umv_num > hdev->priv_umv_size)
7239                         hdev->share_umv_size++;
7240
7241                 if (vport->used_umv_num > 0)
7242                         vport->used_umv_num--;
7243         } else {
7244                 if (vport->used_umv_num >= hdev->priv_umv_size &&
7245                     hdev->share_umv_size > 0)
7246                         hdev->share_umv_size--;
7247                 vport->used_umv_num++;
7248         }
7249         mutex_unlock(&hdev->umv_mutex);
7250 }
7251
7252 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7253                              const unsigned char *addr)
7254 {
7255         struct hclge_vport *vport = hclge_get_vport(handle);
7256
7257         return hclge_add_uc_addr_common(vport, addr);
7258 }
7259
7260 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7261                              const unsigned char *addr)
7262 {
7263         struct hclge_dev *hdev = vport->back;
7264         struct hclge_mac_vlan_tbl_entry_cmd req;
7265         struct hclge_desc desc;
7266         u16 egress_port = 0;
7267         int ret;
7268
7269         /* mac addr check */
7270         if (is_zero_ether_addr(addr) ||
7271             is_broadcast_ether_addr(addr) ||
7272             is_multicast_ether_addr(addr)) {
7273                 dev_err(&hdev->pdev->dev,
7274                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7275                          addr, is_zero_ether_addr(addr),
7276                          is_broadcast_ether_addr(addr),
7277                          is_multicast_ether_addr(addr));
7278                 return -EINVAL;
7279         }
7280
7281         memset(&req, 0, sizeof(req));
7282
7283         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7284                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7285
7286         req.egress_port = cpu_to_le16(egress_port);
7287
7288         hclge_prepare_mac_addr(&req, addr, false);
7289
7290         /* Lookup the mac address in the mac_vlan table, and add
7291          * it if the entry is inexistent. Repeated unicast entry
7292          * is not allowed in the mac vlan table.
7293          */
7294         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7295         if (ret == -ENOENT) {
7296                 if (!hclge_is_umv_space_full(vport)) {
7297                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7298                         if (!ret)
7299                                 hclge_update_umv_space(vport, false);
7300                         return ret;
7301                 }
7302
7303                 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7304                         hdev->priv_umv_size);
7305
7306                 return -ENOSPC;
7307         }
7308
7309         /* check if we just hit the duplicate */
7310         if (!ret) {
7311                 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7312                          vport->vport_id, addr);
7313                 return 0;
7314         }
7315
7316         dev_err(&hdev->pdev->dev,
7317                 "PF failed to add unicast entry(%pM) in the MAC table\n",
7318                 addr);
7319
7320         return ret;
7321 }
7322
7323 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7324                             const unsigned char *addr)
7325 {
7326         struct hclge_vport *vport = hclge_get_vport(handle);
7327
7328         return hclge_rm_uc_addr_common(vport, addr);
7329 }
7330
7331 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7332                             const unsigned char *addr)
7333 {
7334         struct hclge_dev *hdev = vport->back;
7335         struct hclge_mac_vlan_tbl_entry_cmd req;
7336         int ret;
7337
7338         /* mac addr check */
7339         if (is_zero_ether_addr(addr) ||
7340             is_broadcast_ether_addr(addr) ||
7341             is_multicast_ether_addr(addr)) {
7342                 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7343                         addr);
7344                 return -EINVAL;
7345         }
7346
7347         memset(&req, 0, sizeof(req));
7348         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7349         hclge_prepare_mac_addr(&req, addr, false);
7350         ret = hclge_remove_mac_vlan_tbl(vport, &req);
7351         if (!ret)
7352                 hclge_update_umv_space(vport, true);
7353
7354         return ret;
7355 }
7356
7357 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7358                              const unsigned char *addr)
7359 {
7360         struct hclge_vport *vport = hclge_get_vport(handle);
7361
7362         return hclge_add_mc_addr_common(vport, addr);
7363 }
7364
7365 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7366                              const unsigned char *addr)
7367 {
7368         struct hclge_dev *hdev = vport->back;
7369         struct hclge_mac_vlan_tbl_entry_cmd req;
7370         struct hclge_desc desc[3];
7371         int status;
7372
7373         /* mac addr check */
7374         if (!is_multicast_ether_addr(addr)) {
7375                 dev_err(&hdev->pdev->dev,
7376                         "Add mc mac err! invalid mac:%pM.\n",
7377                          addr);
7378                 return -EINVAL;
7379         }
7380         memset(&req, 0, sizeof(req));
7381         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7382         hclge_prepare_mac_addr(&req, addr, true);
7383         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7384         if (status) {
7385                 /* This mac addr do not exist, add new entry for it */
7386                 memset(desc[0].data, 0, sizeof(desc[0].data));
7387                 memset(desc[1].data, 0, sizeof(desc[0].data));
7388                 memset(desc[2].data, 0, sizeof(desc[0].data));
7389         }
7390         status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7391         if (status)
7392                 return status;
7393         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7394
7395         if (status == -ENOSPC)
7396                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7397
7398         return status;
7399 }
7400
7401 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7402                             const unsigned char *addr)
7403 {
7404         struct hclge_vport *vport = hclge_get_vport(handle);
7405
7406         return hclge_rm_mc_addr_common(vport, addr);
7407 }
7408
7409 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7410                             const unsigned char *addr)
7411 {
7412         struct hclge_dev *hdev = vport->back;
7413         struct hclge_mac_vlan_tbl_entry_cmd req;
7414         enum hclge_cmd_status status;
7415         struct hclge_desc desc[3];
7416
7417         /* mac addr check */
7418         if (!is_multicast_ether_addr(addr)) {
7419                 dev_dbg(&hdev->pdev->dev,
7420                         "Remove mc mac err! invalid mac:%pM.\n",
7421                          addr);
7422                 return -EINVAL;
7423         }
7424
7425         memset(&req, 0, sizeof(req));
7426         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7427         hclge_prepare_mac_addr(&req, addr, true);
7428         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7429         if (!status) {
7430                 /* This mac addr exist, remove this handle's VFID for it */
7431                 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7432                 if (status)
7433                         return status;
7434
7435                 if (hclge_is_all_function_id_zero(desc))
7436                         /* All the vfid is zero, so need to delete this entry */
7437                         status = hclge_remove_mac_vlan_tbl(vport, &req);
7438                 else
7439                         /* Not all the vfid is zero, update the vfid */
7440                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7441
7442         } else {
7443                 /* Maybe this mac address is in mta table, but it cannot be
7444                  * deleted here because an entry of mta represents an address
7445                  * range rather than a specific address. the delete action to
7446                  * all entries will take effect in update_mta_status called by
7447                  * hns3_nic_set_rx_mode.
7448                  */
7449                 status = 0;
7450         }
7451
7452         return status;
7453 }
7454
7455 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7456                                enum HCLGE_MAC_ADDR_TYPE mac_type)
7457 {
7458         struct hclge_vport_mac_addr_cfg *mac_cfg;
7459         struct list_head *list;
7460
7461         if (!vport->vport_id)
7462                 return;
7463
7464         mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
7465         if (!mac_cfg)
7466                 return;
7467
7468         mac_cfg->hd_tbl_status = true;
7469         memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
7470
7471         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7472                &vport->uc_mac_list : &vport->mc_mac_list;
7473
7474         list_add_tail(&mac_cfg->node, list);
7475 }
7476
7477 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7478                               bool is_write_tbl,
7479                               enum HCLGE_MAC_ADDR_TYPE mac_type)
7480 {
7481         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7482         struct list_head *list;
7483         bool uc_flag, mc_flag;
7484
7485         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7486                &vport->uc_mac_list : &vport->mc_mac_list;
7487
7488         uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
7489         mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
7490
7491         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7492                 if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
7493                         if (uc_flag && mac_cfg->hd_tbl_status)
7494                                 hclge_rm_uc_addr_common(vport, mac_addr);
7495
7496                         if (mc_flag && mac_cfg->hd_tbl_status)
7497                                 hclge_rm_mc_addr_common(vport, mac_addr);
7498
7499                         list_del(&mac_cfg->node);
7500                         kfree(mac_cfg);
7501                         break;
7502                 }
7503         }
7504 }
7505
7506 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7507                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
7508 {
7509         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7510         struct list_head *list;
7511
7512         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7513                &vport->uc_mac_list : &vport->mc_mac_list;
7514
7515         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7516                 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
7517                         hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
7518
7519                 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
7520                         hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
7521
7522                 mac_cfg->hd_tbl_status = false;
7523                 if (is_del_list) {
7524                         list_del(&mac_cfg->node);
7525                         kfree(mac_cfg);
7526                 }
7527         }
7528 }
7529
7530 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
7531 {
7532         struct hclge_vport_mac_addr_cfg *mac, *tmp;
7533         struct hclge_vport *vport;
7534         int i;
7535
7536         for (i = 0; i < hdev->num_alloc_vport; i++) {
7537                 vport = &hdev->vport[i];
7538                 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
7539                         list_del(&mac->node);
7540                         kfree(mac);
7541                 }
7542
7543                 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
7544                         list_del(&mac->node);
7545                         kfree(mac);
7546                 }
7547         }
7548 }
7549
7550 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7551                                               u16 cmdq_resp, u8 resp_code)
7552 {
7553 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
7554 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
7555 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
7556 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
7557
7558         int return_status;
7559
7560         if (cmdq_resp) {
7561                 dev_err(&hdev->pdev->dev,
7562                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
7563                         cmdq_resp);
7564                 return -EIO;
7565         }
7566
7567         switch (resp_code) {
7568         case HCLGE_ETHERTYPE_SUCCESS_ADD:
7569         case HCLGE_ETHERTYPE_ALREADY_ADD:
7570                 return_status = 0;
7571                 break;
7572         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7573                 dev_err(&hdev->pdev->dev,
7574                         "add mac ethertype failed for manager table overflow.\n");
7575                 return_status = -EIO;
7576                 break;
7577         case HCLGE_ETHERTYPE_KEY_CONFLICT:
7578                 dev_err(&hdev->pdev->dev,
7579                         "add mac ethertype failed for key conflict.\n");
7580                 return_status = -EIO;
7581                 break;
7582         default:
7583                 dev_err(&hdev->pdev->dev,
7584                         "add mac ethertype failed for undefined, code=%u.\n",
7585                         resp_code);
7586                 return_status = -EIO;
7587         }
7588
7589         return return_status;
7590 }
7591
7592 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
7593                                      u8 *mac_addr)
7594 {
7595         struct hclge_mac_vlan_tbl_entry_cmd req;
7596         struct hclge_dev *hdev = vport->back;
7597         struct hclge_desc desc;
7598         u16 egress_port = 0;
7599         int i;
7600
7601         if (is_zero_ether_addr(mac_addr))
7602                 return false;
7603
7604         memset(&req, 0, sizeof(req));
7605         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7606                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7607         req.egress_port = cpu_to_le16(egress_port);
7608         hclge_prepare_mac_addr(&req, mac_addr, false);
7609
7610         if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
7611                 return true;
7612
7613         vf_idx += HCLGE_VF_VPORT_START_NUM;
7614         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
7615                 if (i != vf_idx &&
7616                     ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
7617                         return true;
7618
7619         return false;
7620 }
7621
7622 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
7623                             u8 *mac_addr)
7624 {
7625         struct hclge_vport *vport = hclge_get_vport(handle);
7626         struct hclge_dev *hdev = vport->back;
7627
7628         vport = hclge_get_vf_vport(hdev, vf);
7629         if (!vport)
7630                 return -EINVAL;
7631
7632         if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
7633                 dev_info(&hdev->pdev->dev,
7634                          "Specified MAC(=%pM) is same as before, no change committed!\n",
7635                          mac_addr);
7636                 return 0;
7637         }
7638
7639         if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
7640                 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
7641                         mac_addr);
7642                 return -EEXIST;
7643         }
7644
7645         ether_addr_copy(vport->vf_info.mac, mac_addr);
7646         dev_info(&hdev->pdev->dev,
7647                  "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
7648                  vf, mac_addr);
7649
7650         return hclge_inform_reset_assert_to_vf(vport);
7651 }
7652
7653 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
7654                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
7655 {
7656         struct hclge_desc desc;
7657         u8 resp_code;
7658         u16 retval;
7659         int ret;
7660
7661         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
7662         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
7663
7664         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7665         if (ret) {
7666                 dev_err(&hdev->pdev->dev,
7667                         "add mac ethertype failed for cmd_send, ret =%d.\n",
7668                         ret);
7669                 return ret;
7670         }
7671
7672         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7673         retval = le16_to_cpu(desc.retval);
7674
7675         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7676 }
7677
7678 static int init_mgr_tbl(struct hclge_dev *hdev)
7679 {
7680         int ret;
7681         int i;
7682
7683         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7684                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7685                 if (ret) {
7686                         dev_err(&hdev->pdev->dev,
7687                                 "add mac ethertype failed, ret =%d.\n",
7688                                 ret);
7689                         return ret;
7690                 }
7691         }
7692
7693         return 0;
7694 }
7695
7696 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7697 {
7698         struct hclge_vport *vport = hclge_get_vport(handle);
7699         struct hclge_dev *hdev = vport->back;
7700
7701         ether_addr_copy(p, hdev->hw.mac.mac_addr);
7702 }
7703
7704 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7705                               bool is_first)
7706 {
7707         const unsigned char *new_addr = (const unsigned char *)p;
7708         struct hclge_vport *vport = hclge_get_vport(handle);
7709         struct hclge_dev *hdev = vport->back;
7710         int ret;
7711
7712         /* mac addr check */
7713         if (is_zero_ether_addr(new_addr) ||
7714             is_broadcast_ether_addr(new_addr) ||
7715             is_multicast_ether_addr(new_addr)) {
7716                 dev_err(&hdev->pdev->dev,
7717                         "Change uc mac err! invalid mac:%pM.\n",
7718                          new_addr);
7719                 return -EINVAL;
7720         }
7721
7722         if ((!is_first || is_kdump_kernel()) &&
7723             hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7724                 dev_warn(&hdev->pdev->dev,
7725                          "remove old uc mac address fail.\n");
7726
7727         ret = hclge_add_uc_addr(handle, new_addr);
7728         if (ret) {
7729                 dev_err(&hdev->pdev->dev,
7730                         "add uc mac address fail, ret =%d.\n",
7731                         ret);
7732
7733                 if (!is_first &&
7734                     hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7735                         dev_err(&hdev->pdev->dev,
7736                                 "restore uc mac address fail.\n");
7737
7738                 return -EIO;
7739         }
7740
7741         ret = hclge_pause_addr_cfg(hdev, new_addr);
7742         if (ret) {
7743                 dev_err(&hdev->pdev->dev,
7744                         "configure mac pause address fail, ret =%d.\n",
7745                         ret);
7746                 return -EIO;
7747         }
7748
7749         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7750
7751         return 0;
7752 }
7753
7754 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7755                           int cmd)
7756 {
7757         struct hclge_vport *vport = hclge_get_vport(handle);
7758         struct hclge_dev *hdev = vport->back;
7759
7760         if (!hdev->hw.mac.phydev)
7761                 return -EOPNOTSUPP;
7762
7763         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7764 }
7765
7766 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7767                                       u8 fe_type, bool filter_en, u8 vf_id)
7768 {
7769         struct hclge_vlan_filter_ctrl_cmd *req;
7770         struct hclge_desc desc;
7771         int ret;
7772
7773         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7774
7775         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7776         req->vlan_type = vlan_type;
7777         req->vlan_fe = filter_en ? fe_type : 0;
7778         req->vf_id = vf_id;
7779
7780         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7781         if (ret)
7782                 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7783                         ret);
7784
7785         return ret;
7786 }
7787
7788 #define HCLGE_FILTER_TYPE_VF            0
7789 #define HCLGE_FILTER_TYPE_PORT          1
7790 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
7791 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
7792 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
7793 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
7794 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
7795 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
7796                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7797 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
7798                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7799
7800 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7801 {
7802         struct hclge_vport *vport = hclge_get_vport(handle);
7803         struct hclge_dev *hdev = vport->back;
7804
7805         if (hdev->pdev->revision >= 0x21) {
7806                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7807                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
7808                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7809                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
7810         } else {
7811                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7812                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7813                                            0);
7814         }
7815         if (enable)
7816                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7817         else
7818                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7819 }
7820
7821 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7822                                     bool is_kill, u16 vlan,
7823                                     __be16 proto)
7824 {
7825         struct hclge_vport *vport = &hdev->vport[vfid];
7826         struct hclge_vlan_filter_vf_cfg_cmd *req0;
7827         struct hclge_vlan_filter_vf_cfg_cmd *req1;
7828         struct hclge_desc desc[2];
7829         u8 vf_byte_val;
7830         u8 vf_byte_off;
7831         int ret;
7832
7833         /* if vf vlan table is full, firmware will close vf vlan filter, it
7834          * is unable and unnecessary to add new vlan id to vf vlan filter.
7835          * If spoof check is enable, and vf vlan is full, it shouldn't add
7836          * new vlan, because tx packets with these vlan id will be dropped.
7837          */
7838         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
7839                 if (vport->vf_info.spoofchk && vlan) {
7840                         dev_err(&hdev->pdev->dev,
7841                                 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
7842                         return -EPERM;
7843                 }
7844                 return 0;
7845         }
7846
7847         hclge_cmd_setup_basic_desc(&desc[0],
7848                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7849         hclge_cmd_setup_basic_desc(&desc[1],
7850                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7851
7852         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7853
7854         vf_byte_off = vfid / 8;
7855         vf_byte_val = 1 << (vfid % 8);
7856
7857         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7858         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7859
7860         req0->vlan_id  = cpu_to_le16(vlan);
7861         req0->vlan_cfg = is_kill;
7862
7863         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7864                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7865         else
7866                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7867
7868         ret = hclge_cmd_send(&hdev->hw, desc, 2);
7869         if (ret) {
7870                 dev_err(&hdev->pdev->dev,
7871                         "Send vf vlan command fail, ret =%d.\n",
7872                         ret);
7873                 return ret;
7874         }
7875
7876         if (!is_kill) {
7877 #define HCLGE_VF_VLAN_NO_ENTRY  2
7878                 if (!req0->resp_code || req0->resp_code == 1)
7879                         return 0;
7880
7881                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7882                         set_bit(vfid, hdev->vf_vlan_full);
7883                         dev_warn(&hdev->pdev->dev,
7884                                  "vf vlan table is full, vf vlan filter is disabled\n");
7885                         return 0;
7886                 }
7887
7888                 dev_err(&hdev->pdev->dev,
7889                         "Add vf vlan filter fail, ret =%u.\n",
7890                         req0->resp_code);
7891         } else {
7892 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
7893                 if (!req0->resp_code)
7894                         return 0;
7895
7896                 /* vf vlan filter is disabled when vf vlan table is full,
7897                  * then new vlan id will not be added into vf vlan table.
7898                  * Just return 0 without warning, avoid massive verbose
7899                  * print logs when unload.
7900                  */
7901                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7902                         return 0;
7903
7904                 dev_err(&hdev->pdev->dev,
7905                         "Kill vf vlan filter fail, ret =%u.\n",
7906                         req0->resp_code);
7907         }
7908
7909         return -EIO;
7910 }
7911
7912 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7913                                       u16 vlan_id, bool is_kill)
7914 {
7915         struct hclge_vlan_filter_pf_cfg_cmd *req;
7916         struct hclge_desc desc;
7917         u8 vlan_offset_byte_val;
7918         u8 vlan_offset_byte;
7919         u8 vlan_offset_160;
7920         int ret;
7921
7922         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7923
7924         vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
7925         vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
7926                            HCLGE_VLAN_BYTE_SIZE;
7927         vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
7928
7929         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7930         req->vlan_offset = vlan_offset_160;
7931         req->vlan_cfg = is_kill;
7932         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7933
7934         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7935         if (ret)
7936                 dev_err(&hdev->pdev->dev,
7937                         "port vlan command, send fail, ret =%d.\n", ret);
7938         return ret;
7939 }
7940
7941 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7942                                     u16 vport_id, u16 vlan_id,
7943                                     bool is_kill)
7944 {
7945         u16 vport_idx, vport_num = 0;
7946         int ret;
7947
7948         if (is_kill && !vlan_id)
7949                 return 0;
7950
7951         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7952                                        proto);
7953         if (ret) {
7954                 dev_err(&hdev->pdev->dev,
7955                         "Set %u vport vlan filter config fail, ret =%d.\n",
7956                         vport_id, ret);
7957                 return ret;
7958         }
7959
7960         /* vlan 0 may be added twice when 8021q module is enabled */
7961         if (!is_kill && !vlan_id &&
7962             test_bit(vport_id, hdev->vlan_table[vlan_id]))
7963                 return 0;
7964
7965         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7966                 dev_err(&hdev->pdev->dev,
7967                         "Add port vlan failed, vport %u is already in vlan %u\n",
7968                         vport_id, vlan_id);
7969                 return -EINVAL;
7970         }
7971
7972         if (is_kill &&
7973             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7974                 dev_err(&hdev->pdev->dev,
7975                         "Delete port vlan failed, vport %u is not in vlan %u\n",
7976                         vport_id, vlan_id);
7977                 return -EINVAL;
7978         }
7979
7980         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7981                 vport_num++;
7982
7983         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7984                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7985                                                  is_kill);
7986
7987         return ret;
7988 }
7989
7990 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7991 {
7992         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7993         struct hclge_vport_vtag_tx_cfg_cmd *req;
7994         struct hclge_dev *hdev = vport->back;
7995         struct hclge_desc desc;
7996         u16 bmap_index;
7997         int status;
7998
7999         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
8000
8001         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
8002         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
8003         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
8004         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
8005                       vcfg->accept_tag1 ? 1 : 0);
8006         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
8007                       vcfg->accept_untag1 ? 1 : 0);
8008         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
8009                       vcfg->accept_tag2 ? 1 : 0);
8010         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
8011                       vcfg->accept_untag2 ? 1 : 0);
8012         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
8013                       vcfg->insert_tag1_en ? 1 : 0);
8014         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8015                       vcfg->insert_tag2_en ? 1 : 0);
8016         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
8017
8018         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8019         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8020                         HCLGE_VF_NUM_PER_BYTE;
8021         req->vf_bitmap[bmap_index] =
8022                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8023
8024         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8025         if (status)
8026                 dev_err(&hdev->pdev->dev,
8027                         "Send port txvlan cfg command fail, ret =%d\n",
8028                         status);
8029
8030         return status;
8031 }
8032
8033 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8034 {
8035         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8036         struct hclge_vport_vtag_rx_cfg_cmd *req;
8037         struct hclge_dev *hdev = vport->back;
8038         struct hclge_desc desc;
8039         u16 bmap_index;
8040         int status;
8041
8042         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8043
8044         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8045         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8046                       vcfg->strip_tag1_en ? 1 : 0);
8047         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8048                       vcfg->strip_tag2_en ? 1 : 0);
8049         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8050                       vcfg->vlan1_vlan_prionly ? 1 : 0);
8051         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8052                       vcfg->vlan2_vlan_prionly ? 1 : 0);
8053
8054         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8055         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8056                         HCLGE_VF_NUM_PER_BYTE;
8057         req->vf_bitmap[bmap_index] =
8058                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8059
8060         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8061         if (status)
8062                 dev_err(&hdev->pdev->dev,
8063                         "Send port rxvlan cfg command fail, ret =%d\n",
8064                         status);
8065
8066         return status;
8067 }
8068
8069 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8070                                   u16 port_base_vlan_state,
8071                                   u16 vlan_tag)
8072 {
8073         int ret;
8074
8075         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8076                 vport->txvlan_cfg.accept_tag1 = true;
8077                 vport->txvlan_cfg.insert_tag1_en = false;
8078                 vport->txvlan_cfg.default_tag1 = 0;
8079         } else {
8080                 vport->txvlan_cfg.accept_tag1 = false;
8081                 vport->txvlan_cfg.insert_tag1_en = true;
8082                 vport->txvlan_cfg.default_tag1 = vlan_tag;
8083         }
8084
8085         vport->txvlan_cfg.accept_untag1 = true;
8086
8087         /* accept_tag2 and accept_untag2 are not supported on
8088          * pdev revision(0x20), new revision support them,
8089          * this two fields can not be configured by user.
8090          */
8091         vport->txvlan_cfg.accept_tag2 = true;
8092         vport->txvlan_cfg.accept_untag2 = true;
8093         vport->txvlan_cfg.insert_tag2_en = false;
8094         vport->txvlan_cfg.default_tag2 = 0;
8095
8096         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8097                 vport->rxvlan_cfg.strip_tag1_en = false;
8098                 vport->rxvlan_cfg.strip_tag2_en =
8099                                 vport->rxvlan_cfg.rx_vlan_offload_en;
8100         } else {
8101                 vport->rxvlan_cfg.strip_tag1_en =
8102                                 vport->rxvlan_cfg.rx_vlan_offload_en;
8103                 vport->rxvlan_cfg.strip_tag2_en = true;
8104         }
8105         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8106         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8107
8108         ret = hclge_set_vlan_tx_offload_cfg(vport);
8109         if (ret)
8110                 return ret;
8111
8112         return hclge_set_vlan_rx_offload_cfg(vport);
8113 }
8114
8115 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8116 {
8117         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8118         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8119         struct hclge_desc desc;
8120         int status;
8121
8122         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8123         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8124         rx_req->ot_fst_vlan_type =
8125                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8126         rx_req->ot_sec_vlan_type =
8127                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8128         rx_req->in_fst_vlan_type =
8129                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8130         rx_req->in_sec_vlan_type =
8131                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8132
8133         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8134         if (status) {
8135                 dev_err(&hdev->pdev->dev,
8136                         "Send rxvlan protocol type command fail, ret =%d\n",
8137                         status);
8138                 return status;
8139         }
8140
8141         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8142
8143         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8144         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8145         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8146
8147         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8148         if (status)
8149                 dev_err(&hdev->pdev->dev,
8150                         "Send txvlan protocol type command fail, ret =%d\n",
8151                         status);
8152
8153         return status;
8154 }
8155
8156 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8157 {
8158 #define HCLGE_DEF_VLAN_TYPE             0x8100
8159
8160         struct hnae3_handle *handle = &hdev->vport[0].nic;
8161         struct hclge_vport *vport;
8162         int ret;
8163         int i;
8164
8165         if (hdev->pdev->revision >= 0x21) {
8166                 /* for revision 0x21, vf vlan filter is per function */
8167                 for (i = 0; i < hdev->num_alloc_vport; i++) {
8168                         vport = &hdev->vport[i];
8169                         ret = hclge_set_vlan_filter_ctrl(hdev,
8170                                                          HCLGE_FILTER_TYPE_VF,
8171                                                          HCLGE_FILTER_FE_EGRESS,
8172                                                          true,
8173                                                          vport->vport_id);
8174                         if (ret)
8175                                 return ret;
8176                 }
8177
8178                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8179                                                  HCLGE_FILTER_FE_INGRESS, true,
8180                                                  0);
8181                 if (ret)
8182                         return ret;
8183         } else {
8184                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8185                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
8186                                                  true, 0);
8187                 if (ret)
8188                         return ret;
8189         }
8190
8191         handle->netdev_flags |= HNAE3_VLAN_FLTR;
8192
8193         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8194         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8195         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8196         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8197         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8198         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8199
8200         ret = hclge_set_vlan_protocol_type(hdev);
8201         if (ret)
8202                 return ret;
8203
8204         for (i = 0; i < hdev->num_alloc_vport; i++) {
8205                 u16 vlan_tag;
8206
8207                 vport = &hdev->vport[i];
8208                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8209
8210                 ret = hclge_vlan_offload_cfg(vport,
8211                                              vport->port_base_vlan_cfg.state,
8212                                              vlan_tag);
8213                 if (ret)
8214                         return ret;
8215         }
8216
8217         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8218 }
8219
8220 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8221                                        bool writen_to_tbl)
8222 {
8223         struct hclge_vport_vlan_cfg *vlan;
8224
8225         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8226         if (!vlan)
8227                 return;
8228
8229         vlan->hd_tbl_status = writen_to_tbl;
8230         vlan->vlan_id = vlan_id;
8231
8232         list_add_tail(&vlan->node, &vport->vlan_list);
8233 }
8234
8235 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8236 {
8237         struct hclge_vport_vlan_cfg *vlan, *tmp;
8238         struct hclge_dev *hdev = vport->back;
8239         int ret;
8240
8241         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8242                 if (!vlan->hd_tbl_status) {
8243                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8244                                                        vport->vport_id,
8245                                                        vlan->vlan_id, false);
8246                         if (ret) {
8247                                 dev_err(&hdev->pdev->dev,
8248                                         "restore vport vlan list failed, ret=%d\n",
8249                                         ret);
8250                                 return ret;
8251                         }
8252                 }
8253                 vlan->hd_tbl_status = true;
8254         }
8255
8256         return 0;
8257 }
8258
8259 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8260                                       bool is_write_tbl)
8261 {
8262         struct hclge_vport_vlan_cfg *vlan, *tmp;
8263         struct hclge_dev *hdev = vport->back;
8264
8265         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8266                 if (vlan->vlan_id == vlan_id) {
8267                         if (is_write_tbl && vlan->hd_tbl_status)
8268                                 hclge_set_vlan_filter_hw(hdev,
8269                                                          htons(ETH_P_8021Q),
8270                                                          vport->vport_id,
8271                                                          vlan_id,
8272                                                          true);
8273
8274                         list_del(&vlan->node);
8275                         kfree(vlan);
8276                         break;
8277                 }
8278         }
8279 }
8280
8281 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8282 {
8283         struct hclge_vport_vlan_cfg *vlan, *tmp;
8284         struct hclge_dev *hdev = vport->back;
8285
8286         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8287                 if (vlan->hd_tbl_status)
8288                         hclge_set_vlan_filter_hw(hdev,
8289                                                  htons(ETH_P_8021Q),
8290                                                  vport->vport_id,
8291                                                  vlan->vlan_id,
8292                                                  true);
8293
8294                 vlan->hd_tbl_status = false;
8295                 if (is_del_list) {
8296                         list_del(&vlan->node);
8297                         kfree(vlan);
8298                 }
8299         }
8300 }
8301
8302 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8303 {
8304         struct hclge_vport_vlan_cfg *vlan, *tmp;
8305         struct hclge_vport *vport;
8306         int i;
8307
8308         for (i = 0; i < hdev->num_alloc_vport; i++) {
8309                 vport = &hdev->vport[i];
8310                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8311                         list_del(&vlan->node);
8312                         kfree(vlan);
8313                 }
8314         }
8315 }
8316
8317 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
8318 {
8319         struct hclge_vport *vport = hclge_get_vport(handle);
8320         struct hclge_vport_vlan_cfg *vlan, *tmp;
8321         struct hclge_dev *hdev = vport->back;
8322         u16 vlan_proto;
8323         u16 state, vlan_id;
8324         int i;
8325
8326         for (i = 0; i < hdev->num_alloc_vport; i++) {
8327                 vport = &hdev->vport[i];
8328                 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8329                 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8330                 state = vport->port_base_vlan_cfg.state;
8331
8332                 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8333                         hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8334                                                  vport->vport_id, vlan_id,
8335                                                  false);
8336                         continue;
8337                 }
8338
8339                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8340                         int ret;
8341
8342                         if (!vlan->hd_tbl_status)
8343                                 continue;
8344                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8345                                                        vport->vport_id,
8346                                                        vlan->vlan_id, false);
8347                         if (ret)
8348                                 break;
8349                 }
8350         }
8351 }
8352
8353 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8354 {
8355         struct hclge_vport *vport = hclge_get_vport(handle);
8356
8357         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8358                 vport->rxvlan_cfg.strip_tag1_en = false;
8359                 vport->rxvlan_cfg.strip_tag2_en = enable;
8360         } else {
8361                 vport->rxvlan_cfg.strip_tag1_en = enable;
8362                 vport->rxvlan_cfg.strip_tag2_en = true;
8363         }
8364         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8365         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8366         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8367
8368         return hclge_set_vlan_rx_offload_cfg(vport);
8369 }
8370
8371 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8372                                             u16 port_base_vlan_state,
8373                                             struct hclge_vlan_info *new_info,
8374                                             struct hclge_vlan_info *old_info)
8375 {
8376         struct hclge_dev *hdev = vport->back;
8377         int ret;
8378
8379         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8380                 hclge_rm_vport_all_vlan_table(vport, false);
8381                 return hclge_set_vlan_filter_hw(hdev,
8382                                                  htons(new_info->vlan_proto),
8383                                                  vport->vport_id,
8384                                                  new_info->vlan_tag,
8385                                                  false);
8386         }
8387
8388         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8389                                        vport->vport_id, old_info->vlan_tag,
8390                                        true);
8391         if (ret)
8392                 return ret;
8393
8394         return hclge_add_vport_all_vlan_table(vport);
8395 }
8396
8397 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8398                                     struct hclge_vlan_info *vlan_info)
8399 {
8400         struct hnae3_handle *nic = &vport->nic;
8401         struct hclge_vlan_info *old_vlan_info;
8402         struct hclge_dev *hdev = vport->back;
8403         int ret;
8404
8405         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8406
8407         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8408         if (ret)
8409                 return ret;
8410
8411         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8412                 /* add new VLAN tag */
8413                 ret = hclge_set_vlan_filter_hw(hdev,
8414                                                htons(vlan_info->vlan_proto),
8415                                                vport->vport_id,
8416                                                vlan_info->vlan_tag,
8417                                                false);
8418                 if (ret)
8419                         return ret;
8420
8421                 /* remove old VLAN tag */
8422                 ret = hclge_set_vlan_filter_hw(hdev,
8423                                                htons(old_vlan_info->vlan_proto),
8424                                                vport->vport_id,
8425                                                old_vlan_info->vlan_tag,
8426                                                true);
8427                 if (ret)
8428                         return ret;
8429
8430                 goto update;
8431         }
8432
8433         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8434                                                old_vlan_info);
8435         if (ret)
8436                 return ret;
8437
8438         /* update state only when disable/enable port based VLAN */
8439         vport->port_base_vlan_cfg.state = state;
8440         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8441                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8442         else
8443                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8444
8445 update:
8446         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8447         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8448         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8449
8450         return 0;
8451 }
8452
8453 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8454                                           enum hnae3_port_base_vlan_state state,
8455                                           u16 vlan)
8456 {
8457         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8458                 if (!vlan)
8459                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8460                 else
8461                         return HNAE3_PORT_BASE_VLAN_ENABLE;
8462         } else {
8463                 if (!vlan)
8464                         return HNAE3_PORT_BASE_VLAN_DISABLE;
8465                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8466                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8467                 else
8468                         return HNAE3_PORT_BASE_VLAN_MODIFY;
8469         }
8470 }
8471
8472 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8473                                     u16 vlan, u8 qos, __be16 proto)
8474 {
8475         struct hclge_vport *vport = hclge_get_vport(handle);
8476         struct hclge_dev *hdev = vport->back;
8477         struct hclge_vlan_info vlan_info;
8478         u16 state;
8479         int ret;
8480
8481         if (hdev->pdev->revision == 0x20)
8482                 return -EOPNOTSUPP;
8483
8484         vport = hclge_get_vf_vport(hdev, vfid);
8485         if (!vport)
8486                 return -EINVAL;
8487
8488         /* qos is a 3 bits value, so can not be bigger than 7 */
8489         if (vlan > VLAN_N_VID - 1 || qos > 7)
8490                 return -EINVAL;
8491         if (proto != htons(ETH_P_8021Q))
8492                 return -EPROTONOSUPPORT;
8493
8494         state = hclge_get_port_base_vlan_state(vport,
8495                                                vport->port_base_vlan_cfg.state,
8496                                                vlan);
8497         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8498                 return 0;
8499
8500         vlan_info.vlan_tag = vlan;
8501         vlan_info.qos = qos;
8502         vlan_info.vlan_proto = ntohs(proto);
8503
8504         if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8505                 return hclge_update_port_base_vlan_cfg(vport, state,
8506                                                        &vlan_info);
8507         } else {
8508                 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
8509                                                         vport->vport_id, state,
8510                                                         vlan, qos,
8511                                                         ntohs(proto));
8512                 return ret;
8513         }
8514 }
8515
8516 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
8517                           u16 vlan_id, bool is_kill)
8518 {
8519         struct hclge_vport *vport = hclge_get_vport(handle);
8520         struct hclge_dev *hdev = vport->back;
8521         bool writen_to_tbl = false;
8522         int ret = 0;
8523
8524         /* When device is resetting, firmware is unable to handle
8525          * mailbox. Just record the vlan id, and remove it after
8526          * reset finished.
8527          */
8528         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
8529                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8530                 return -EBUSY;
8531         }
8532
8533         /* when port base vlan enabled, we use port base vlan as the vlan
8534          * filter entry. In this case, we don't update vlan filter table
8535          * when user add new vlan or remove exist vlan, just update the vport
8536          * vlan list. The vlan id in vlan list will be writen in vlan filter
8537          * table until port base vlan disabled
8538          */
8539         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8540                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
8541                                                vlan_id, is_kill);
8542                 writen_to_tbl = true;
8543         }
8544
8545         if (!ret) {
8546                 if (is_kill)
8547                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
8548                 else
8549                         hclge_add_vport_vlan_table(vport, vlan_id,
8550                                                    writen_to_tbl);
8551         } else if (is_kill) {
8552                 /* when remove hw vlan filter failed, record the vlan id,
8553                  * and try to remove it from hw later, to be consistence
8554                  * with stack
8555                  */
8556                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8557         }
8558         return ret;
8559 }
8560
8561 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
8562 {
8563 #define HCLGE_MAX_SYNC_COUNT    60
8564
8565         int i, ret, sync_cnt = 0;
8566         u16 vlan_id;
8567
8568         /* start from vport 1 for PF is always alive */
8569         for (i = 0; i < hdev->num_alloc_vport; i++) {
8570                 struct hclge_vport *vport = &hdev->vport[i];
8571
8572                 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8573                                          VLAN_N_VID);
8574                 while (vlan_id != VLAN_N_VID) {
8575                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8576                                                        vport->vport_id, vlan_id,
8577                                                        true);
8578                         if (ret && ret != -EINVAL)
8579                                 return;
8580
8581                         clear_bit(vlan_id, vport->vlan_del_fail_bmap);
8582                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
8583
8584                         sync_cnt++;
8585                         if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
8586                                 return;
8587
8588                         vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8589                                                  VLAN_N_VID);
8590                 }
8591         }
8592 }
8593
8594 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
8595 {
8596         struct hclge_config_max_frm_size_cmd *req;
8597         struct hclge_desc desc;
8598
8599         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
8600
8601         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
8602         req->max_frm_size = cpu_to_le16(new_mps);
8603         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
8604
8605         return hclge_cmd_send(&hdev->hw, &desc, 1);
8606 }
8607
8608 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
8609 {
8610         struct hclge_vport *vport = hclge_get_vport(handle);
8611
8612         return hclge_set_vport_mtu(vport, new_mtu);
8613 }
8614
8615 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
8616 {
8617         struct hclge_dev *hdev = vport->back;
8618         int i, max_frm_size, ret;
8619
8620         /* HW supprt 2 layer vlan */
8621         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8622         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
8623             max_frm_size > HCLGE_MAC_MAX_FRAME)
8624                 return -EINVAL;
8625
8626         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
8627         mutex_lock(&hdev->vport_lock);
8628         /* VF's mps must fit within hdev->mps */
8629         if (vport->vport_id && max_frm_size > hdev->mps) {
8630                 mutex_unlock(&hdev->vport_lock);
8631                 return -EINVAL;
8632         } else if (vport->vport_id) {
8633                 vport->mps = max_frm_size;
8634                 mutex_unlock(&hdev->vport_lock);
8635                 return 0;
8636         }
8637
8638         /* PF's mps must be greater then VF's mps */
8639         for (i = 1; i < hdev->num_alloc_vport; i++)
8640                 if (max_frm_size < hdev->vport[i].mps) {
8641                         mutex_unlock(&hdev->vport_lock);
8642                         return -EINVAL;
8643                 }
8644
8645         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8646
8647         ret = hclge_set_mac_mtu(hdev, max_frm_size);
8648         if (ret) {
8649                 dev_err(&hdev->pdev->dev,
8650                         "Change mtu fail, ret =%d\n", ret);
8651                 goto out;
8652         }
8653
8654         hdev->mps = max_frm_size;
8655         vport->mps = max_frm_size;
8656
8657         ret = hclge_buffer_alloc(hdev);
8658         if (ret)
8659                 dev_err(&hdev->pdev->dev,
8660                         "Allocate buffer fail, ret =%d\n", ret);
8661
8662 out:
8663         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8664         mutex_unlock(&hdev->vport_lock);
8665         return ret;
8666 }
8667
8668 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
8669                                     bool enable)
8670 {
8671         struct hclge_reset_tqp_queue_cmd *req;
8672         struct hclge_desc desc;
8673         int ret;
8674
8675         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
8676
8677         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8678         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8679         if (enable)
8680                 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
8681
8682         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8683         if (ret) {
8684                 dev_err(&hdev->pdev->dev,
8685                         "Send tqp reset cmd error, status =%d\n", ret);
8686                 return ret;
8687         }
8688
8689         return 0;
8690 }
8691
8692 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8693 {
8694         struct hclge_reset_tqp_queue_cmd *req;
8695         struct hclge_desc desc;
8696         int ret;
8697
8698         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8699
8700         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8701         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8702
8703         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8704         if (ret) {
8705                 dev_err(&hdev->pdev->dev,
8706                         "Get reset status error, status =%d\n", ret);
8707                 return ret;
8708         }
8709
8710         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8711 }
8712
8713 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8714 {
8715         struct hnae3_queue *queue;
8716         struct hclge_tqp *tqp;
8717
8718         queue = handle->kinfo.tqp[queue_id];
8719         tqp = container_of(queue, struct hclge_tqp, q);
8720
8721         return tqp->index;
8722 }
8723
8724 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8725 {
8726         struct hclge_vport *vport = hclge_get_vport(handle);
8727         struct hclge_dev *hdev = vport->back;
8728         int reset_try_times = 0;
8729         int reset_status;
8730         u16 queue_gid;
8731         int ret;
8732
8733         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8734
8735         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8736         if (ret) {
8737                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8738                 return ret;
8739         }
8740
8741         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8742         if (ret) {
8743                 dev_err(&hdev->pdev->dev,
8744                         "Send reset tqp cmd fail, ret = %d\n", ret);
8745                 return ret;
8746         }
8747
8748         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8749                 reset_status = hclge_get_reset_status(hdev, queue_gid);
8750                 if (reset_status)
8751                         break;
8752
8753                 /* Wait for tqp hw reset */
8754                 usleep_range(1000, 1200);
8755         }
8756
8757         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8758                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8759                 return ret;
8760         }
8761
8762         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8763         if (ret)
8764                 dev_err(&hdev->pdev->dev,
8765                         "Deassert the soft reset fail, ret = %d\n", ret);
8766
8767         return ret;
8768 }
8769
8770 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8771 {
8772         struct hclge_dev *hdev = vport->back;
8773         int reset_try_times = 0;
8774         int reset_status;
8775         u16 queue_gid;
8776         int ret;
8777
8778         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8779
8780         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8781         if (ret) {
8782                 dev_warn(&hdev->pdev->dev,
8783                          "Send reset tqp cmd fail, ret = %d\n", ret);
8784                 return;
8785         }
8786
8787         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8788                 reset_status = hclge_get_reset_status(hdev, queue_gid);
8789                 if (reset_status)
8790                         break;
8791
8792                 /* Wait for tqp hw reset */
8793                 usleep_range(1000, 1200);
8794         }
8795
8796         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8797                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8798                 return;
8799         }
8800
8801         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8802         if (ret)
8803                 dev_warn(&hdev->pdev->dev,
8804                          "Deassert the soft reset fail, ret = %d\n", ret);
8805 }
8806
8807 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8808 {
8809         struct hclge_vport *vport = hclge_get_vport(handle);
8810         struct hclge_dev *hdev = vport->back;
8811
8812         return hdev->fw_version;
8813 }
8814
8815 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8816 {
8817         struct phy_device *phydev = hdev->hw.mac.phydev;
8818
8819         if (!phydev)
8820                 return;
8821
8822         phy_set_asym_pause(phydev, rx_en, tx_en);
8823 }
8824
8825 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8826 {
8827         int ret;
8828
8829         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8830                 return 0;
8831
8832         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8833         if (ret)
8834                 dev_err(&hdev->pdev->dev,
8835                         "configure pauseparam error, ret = %d.\n", ret);
8836
8837         return ret;
8838 }
8839
8840 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8841 {
8842         struct phy_device *phydev = hdev->hw.mac.phydev;
8843         u16 remote_advertising = 0;
8844         u16 local_advertising;
8845         u32 rx_pause, tx_pause;
8846         u8 flowctl;
8847
8848         if (!phydev->link || !phydev->autoneg)
8849                 return 0;
8850
8851         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8852
8853         if (phydev->pause)
8854                 remote_advertising = LPA_PAUSE_CAP;
8855
8856         if (phydev->asym_pause)
8857                 remote_advertising |= LPA_PAUSE_ASYM;
8858
8859         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8860                                            remote_advertising);
8861         tx_pause = flowctl & FLOW_CTRL_TX;
8862         rx_pause = flowctl & FLOW_CTRL_RX;
8863
8864         if (phydev->duplex == HCLGE_MAC_HALF) {
8865                 tx_pause = 0;
8866                 rx_pause = 0;
8867         }
8868
8869         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8870 }
8871
8872 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8873                                  u32 *rx_en, u32 *tx_en)
8874 {
8875         struct hclge_vport *vport = hclge_get_vport(handle);
8876         struct hclge_dev *hdev = vport->back;
8877         struct phy_device *phydev = hdev->hw.mac.phydev;
8878
8879         *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8880
8881         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8882                 *rx_en = 0;
8883                 *tx_en = 0;
8884                 return;
8885         }
8886
8887         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8888                 *rx_en = 1;
8889                 *tx_en = 0;
8890         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8891                 *tx_en = 1;
8892                 *rx_en = 0;
8893         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8894                 *rx_en = 1;
8895                 *tx_en = 1;
8896         } else {
8897                 *rx_en = 0;
8898                 *tx_en = 0;
8899         }
8900 }
8901
8902 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
8903                                          u32 rx_en, u32 tx_en)
8904 {
8905         if (rx_en && tx_en)
8906                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
8907         else if (rx_en && !tx_en)
8908                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8909         else if (!rx_en && tx_en)
8910                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8911         else
8912                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
8913
8914         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8915 }
8916
8917 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8918                                 u32 rx_en, u32 tx_en)
8919 {
8920         struct hclge_vport *vport = hclge_get_vport(handle);
8921         struct hclge_dev *hdev = vport->back;
8922         struct phy_device *phydev = hdev->hw.mac.phydev;
8923         u32 fc_autoneg;
8924
8925         if (phydev) {
8926                 fc_autoneg = hclge_get_autoneg(handle);
8927                 if (auto_neg != fc_autoneg) {
8928                         dev_info(&hdev->pdev->dev,
8929                                  "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8930                         return -EOPNOTSUPP;
8931                 }
8932         }
8933
8934         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8935                 dev_info(&hdev->pdev->dev,
8936                          "Priority flow control enabled. Cannot set link flow control.\n");
8937                 return -EOPNOTSUPP;
8938         }
8939
8940         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8941
8942         hclge_record_user_pauseparam(hdev, rx_en, tx_en);
8943
8944         if (!auto_neg)
8945                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8946
8947         if (phydev)
8948                 return phy_start_aneg(phydev);
8949
8950         return -EOPNOTSUPP;
8951 }
8952
8953 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8954                                           u8 *auto_neg, u32 *speed, u8 *duplex)
8955 {
8956         struct hclge_vport *vport = hclge_get_vport(handle);
8957         struct hclge_dev *hdev = vport->back;
8958
8959         if (speed)
8960                 *speed = hdev->hw.mac.speed;
8961         if (duplex)
8962                 *duplex = hdev->hw.mac.duplex;
8963         if (auto_neg)
8964                 *auto_neg = hdev->hw.mac.autoneg;
8965 }
8966
8967 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8968                                  u8 *module_type)
8969 {
8970         struct hclge_vport *vport = hclge_get_vport(handle);
8971         struct hclge_dev *hdev = vport->back;
8972
8973         if (media_type)
8974                 *media_type = hdev->hw.mac.media_type;
8975
8976         if (module_type)
8977                 *module_type = hdev->hw.mac.module_type;
8978 }
8979
8980 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8981                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8982 {
8983         struct hclge_vport *vport = hclge_get_vport(handle);
8984         struct hclge_dev *hdev = vport->back;
8985         struct phy_device *phydev = hdev->hw.mac.phydev;
8986         int mdix_ctrl, mdix, is_resolved;
8987         unsigned int retval;
8988
8989         if (!phydev) {
8990                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8991                 *tp_mdix = ETH_TP_MDI_INVALID;
8992                 return;
8993         }
8994
8995         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8996
8997         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8998         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8999                                     HCLGE_PHY_MDIX_CTRL_S);
9000
9001         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
9002         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
9003         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
9004
9005         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
9006
9007         switch (mdix_ctrl) {
9008         case 0x0:
9009                 *tp_mdix_ctrl = ETH_TP_MDI;
9010                 break;
9011         case 0x1:
9012                 *tp_mdix_ctrl = ETH_TP_MDI_X;
9013                 break;
9014         case 0x3:
9015                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
9016                 break;
9017         default:
9018                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9019                 break;
9020         }
9021
9022         if (!is_resolved)
9023                 *tp_mdix = ETH_TP_MDI_INVALID;
9024         else if (mdix)
9025                 *tp_mdix = ETH_TP_MDI_X;
9026         else
9027                 *tp_mdix = ETH_TP_MDI;
9028 }
9029
9030 static void hclge_info_show(struct hclge_dev *hdev)
9031 {
9032         struct device *dev = &hdev->pdev->dev;
9033
9034         dev_info(dev, "PF info begin:\n");
9035
9036         dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9037         dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9038         dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9039         dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9040         dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9041         dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9042         dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9043         dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9044         dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9045         dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9046         dev_info(dev, "This is %s PF\n",
9047                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9048         dev_info(dev, "DCB %s\n",
9049                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9050         dev_info(dev, "MQPRIO %s\n",
9051                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9052
9053         dev_info(dev, "PF info end.\n");
9054 }
9055
9056 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9057                                           struct hclge_vport *vport)
9058 {
9059         struct hnae3_client *client = vport->nic.client;
9060         struct hclge_dev *hdev = ae_dev->priv;
9061         int rst_cnt = hdev->rst_stats.reset_cnt;
9062         int ret;
9063
9064         ret = client->ops->init_instance(&vport->nic);
9065         if (ret)
9066                 return ret;
9067
9068         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9069         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9070             rst_cnt != hdev->rst_stats.reset_cnt) {
9071                 ret = -EBUSY;
9072                 goto init_nic_err;
9073         }
9074
9075         /* Enable nic hw error interrupts */
9076         ret = hclge_config_nic_hw_error(hdev, true);
9077         if (ret) {
9078                 dev_err(&ae_dev->pdev->dev,
9079                         "fail(%d) to enable hw error interrupts\n", ret);
9080                 goto init_nic_err;
9081         }
9082
9083         hnae3_set_client_init_flag(client, ae_dev, 1);
9084
9085         if (netif_msg_drv(&hdev->vport->nic))
9086                 hclge_info_show(hdev);
9087
9088         return ret;
9089
9090 init_nic_err:
9091         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9092         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9093                 msleep(HCLGE_WAIT_RESET_DONE);
9094
9095         client->ops->uninit_instance(&vport->nic, 0);
9096
9097         return ret;
9098 }
9099
9100 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9101                                            struct hclge_vport *vport)
9102 {
9103         struct hnae3_client *client = vport->roce.client;
9104         struct hclge_dev *hdev = ae_dev->priv;
9105         int rst_cnt;
9106         int ret;
9107
9108         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9109             !hdev->nic_client)
9110                 return 0;
9111
9112         client = hdev->roce_client;
9113         ret = hclge_init_roce_base_info(vport);
9114         if (ret)
9115                 return ret;
9116
9117         rst_cnt = hdev->rst_stats.reset_cnt;
9118         ret = client->ops->init_instance(&vport->roce);
9119         if (ret)
9120                 return ret;
9121
9122         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9123         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9124             rst_cnt != hdev->rst_stats.reset_cnt) {
9125                 ret = -EBUSY;
9126                 goto init_roce_err;
9127         }
9128
9129         /* Enable roce ras interrupts */
9130         ret = hclge_config_rocee_ras_interrupt(hdev, true);
9131         if (ret) {
9132                 dev_err(&ae_dev->pdev->dev,
9133                         "fail(%d) to enable roce ras interrupts\n", ret);
9134                 goto init_roce_err;
9135         }
9136
9137         hnae3_set_client_init_flag(client, ae_dev, 1);
9138
9139         return 0;
9140
9141 init_roce_err:
9142         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9143         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9144                 msleep(HCLGE_WAIT_RESET_DONE);
9145
9146         hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9147
9148         return ret;
9149 }
9150
9151 static int hclge_init_client_instance(struct hnae3_client *client,
9152                                       struct hnae3_ae_dev *ae_dev)
9153 {
9154         struct hclge_dev *hdev = ae_dev->priv;
9155         struct hclge_vport *vport;
9156         int i, ret;
9157
9158         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
9159                 vport = &hdev->vport[i];
9160
9161                 switch (client->type) {
9162                 case HNAE3_CLIENT_KNIC:
9163                         hdev->nic_client = client;
9164                         vport->nic.client = client;
9165                         ret = hclge_init_nic_client_instance(ae_dev, vport);
9166                         if (ret)
9167                                 goto clear_nic;
9168
9169                         ret = hclge_init_roce_client_instance(ae_dev, vport);
9170                         if (ret)
9171                                 goto clear_roce;
9172
9173                         break;
9174                 case HNAE3_CLIENT_ROCE:
9175                         if (hnae3_dev_roce_supported(hdev)) {
9176                                 hdev->roce_client = client;
9177                                 vport->roce.client = client;
9178                         }
9179
9180                         ret = hclge_init_roce_client_instance(ae_dev, vport);
9181                         if (ret)
9182                                 goto clear_roce;
9183
9184                         break;
9185                 default:
9186                         return -EINVAL;
9187                 }
9188         }
9189
9190         return 0;
9191
9192 clear_nic:
9193         hdev->nic_client = NULL;
9194         vport->nic.client = NULL;
9195         return ret;
9196 clear_roce:
9197         hdev->roce_client = NULL;
9198         vport->roce.client = NULL;
9199         return ret;
9200 }
9201
9202 static void hclge_uninit_client_instance(struct hnae3_client *client,
9203                                          struct hnae3_ae_dev *ae_dev)
9204 {
9205         struct hclge_dev *hdev = ae_dev->priv;
9206         struct hclge_vport *vport;
9207         int i;
9208
9209         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9210                 vport = &hdev->vport[i];
9211                 if (hdev->roce_client) {
9212                         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9213                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9214                                 msleep(HCLGE_WAIT_RESET_DONE);
9215
9216                         hdev->roce_client->ops->uninit_instance(&vport->roce,
9217                                                                 0);
9218                         hdev->roce_client = NULL;
9219                         vport->roce.client = NULL;
9220                 }
9221                 if (client->type == HNAE3_CLIENT_ROCE)
9222                         return;
9223                 if (hdev->nic_client && client->ops->uninit_instance) {
9224                         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9225                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9226                                 msleep(HCLGE_WAIT_RESET_DONE);
9227
9228                         client->ops->uninit_instance(&vport->nic, 0);
9229                         hdev->nic_client = NULL;
9230                         vport->nic.client = NULL;
9231                 }
9232         }
9233 }
9234
9235 static int hclge_pci_init(struct hclge_dev *hdev)
9236 {
9237         struct pci_dev *pdev = hdev->pdev;
9238         struct hclge_hw *hw;
9239         int ret;
9240
9241         ret = pci_enable_device(pdev);
9242         if (ret) {
9243                 dev_err(&pdev->dev, "failed to enable PCI device\n");
9244                 return ret;
9245         }
9246
9247         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9248         if (ret) {
9249                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9250                 if (ret) {
9251                         dev_err(&pdev->dev,
9252                                 "can't set consistent PCI DMA");
9253                         goto err_disable_device;
9254                 }
9255                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9256         }
9257
9258         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9259         if (ret) {
9260                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9261                 goto err_disable_device;
9262         }
9263
9264         pci_set_master(pdev);
9265         hw = &hdev->hw;
9266         hw->io_base = pcim_iomap(pdev, 2, 0);
9267         if (!hw->io_base) {
9268                 dev_err(&pdev->dev, "Can't map configuration register space\n");
9269                 ret = -ENOMEM;
9270                 goto err_clr_master;
9271         }
9272
9273         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9274
9275         return 0;
9276 err_clr_master:
9277         pci_clear_master(pdev);
9278         pci_release_regions(pdev);
9279 err_disable_device:
9280         pci_disable_device(pdev);
9281
9282         return ret;
9283 }
9284
9285 static void hclge_pci_uninit(struct hclge_dev *hdev)
9286 {
9287         struct pci_dev *pdev = hdev->pdev;
9288
9289         pcim_iounmap(pdev, hdev->hw.io_base);
9290         pci_free_irq_vectors(pdev);
9291         pci_clear_master(pdev);
9292         pci_release_mem_regions(pdev);
9293         pci_disable_device(pdev);
9294 }
9295
9296 static void hclge_state_init(struct hclge_dev *hdev)
9297 {
9298         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9299         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9300         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9301         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9302         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
9303         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9304         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9305 }
9306
9307 static void hclge_state_uninit(struct hclge_dev *hdev)
9308 {
9309         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9310         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9311
9312         if (hdev->reset_timer.function)
9313                 del_timer_sync(&hdev->reset_timer);
9314         if (hdev->service_task.work.func)
9315                 cancel_delayed_work_sync(&hdev->service_task);
9316 }
9317
9318 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9319 {
9320 #define HCLGE_FLR_RETRY_WAIT_MS 500
9321 #define HCLGE_FLR_RETRY_CNT     5
9322
9323         struct hclge_dev *hdev = ae_dev->priv;
9324         int retry_cnt = 0;
9325         int ret;
9326
9327 retry:
9328         down(&hdev->reset_sem);
9329         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9330         hdev->reset_type = HNAE3_FLR_RESET;
9331         ret = hclge_reset_prepare(hdev);
9332         if (ret) {
9333                 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
9334                         ret);
9335                 if (hdev->reset_pending ||
9336                     retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
9337                         dev_err(&hdev->pdev->dev,
9338                                 "reset_pending:0x%lx, retry_cnt:%d\n",
9339                                 hdev->reset_pending, retry_cnt);
9340                         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9341                         up(&hdev->reset_sem);
9342                         msleep(HCLGE_FLR_RETRY_WAIT_MS);
9343                         goto retry;
9344                 }
9345         }
9346
9347         /* disable misc vector before FLR done */
9348         hclge_enable_vector(&hdev->misc_vector, false);
9349         set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
9350         hdev->rst_stats.flr_rst_cnt++;
9351 }
9352
9353 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9354 {
9355         struct hclge_dev *hdev = ae_dev->priv;
9356         int ret;
9357
9358         hclge_enable_vector(&hdev->misc_vector, true);
9359
9360         ret = hclge_reset_rebuild(hdev);
9361         if (ret)
9362                 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
9363
9364         hdev->reset_type = HNAE3_NONE_RESET;
9365         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9366         up(&hdev->reset_sem);
9367 }
9368
9369 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9370 {
9371         u16 i;
9372
9373         for (i = 0; i < hdev->num_alloc_vport; i++) {
9374                 struct hclge_vport *vport = &hdev->vport[i];
9375                 int ret;
9376
9377                  /* Send cmd to clear VF's FUNC_RST_ING */
9378                 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9379                 if (ret)
9380                         dev_warn(&hdev->pdev->dev,
9381                                  "clear vf(%u) rst failed %d!\n",
9382                                  vport->vport_id, ret);
9383         }
9384 }
9385
9386 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9387 {
9388         struct pci_dev *pdev = ae_dev->pdev;
9389         struct hclge_dev *hdev;
9390         int ret;
9391
9392         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9393         if (!hdev) {
9394                 ret = -ENOMEM;
9395                 goto out;
9396         }
9397
9398         hdev->pdev = pdev;
9399         hdev->ae_dev = ae_dev;
9400         hdev->reset_type = HNAE3_NONE_RESET;
9401         hdev->reset_level = HNAE3_FUNC_RESET;
9402         ae_dev->priv = hdev;
9403
9404         /* HW supprt 2 layer vlan */
9405         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9406
9407         mutex_init(&hdev->vport_lock);
9408         spin_lock_init(&hdev->fd_rule_lock);
9409         sema_init(&hdev->reset_sem, 1);
9410
9411         ret = hclge_pci_init(hdev);
9412         if (ret) {
9413                 dev_err(&pdev->dev, "PCI init failed\n");
9414                 goto out;
9415         }
9416
9417         /* Firmware command queue initialize */
9418         ret = hclge_cmd_queue_init(hdev);
9419         if (ret) {
9420                 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
9421                 goto err_pci_uninit;
9422         }
9423
9424         /* Firmware command initialize */
9425         ret = hclge_cmd_init(hdev);
9426         if (ret)
9427                 goto err_cmd_uninit;
9428
9429         ret = hclge_get_cap(hdev);
9430         if (ret) {
9431                 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
9432                         ret);
9433                 goto err_cmd_uninit;
9434         }
9435
9436         ret = hclge_configure(hdev);
9437         if (ret) {
9438                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9439                 goto err_cmd_uninit;
9440         }
9441
9442         ret = hclge_init_msi(hdev);
9443         if (ret) {
9444                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9445                 goto err_cmd_uninit;
9446         }
9447
9448         ret = hclge_misc_irq_init(hdev);
9449         if (ret) {
9450                 dev_err(&pdev->dev,
9451                         "Misc IRQ(vector0) init error, ret = %d.\n",
9452                         ret);
9453                 goto err_msi_uninit;
9454         }
9455
9456         ret = hclge_alloc_tqps(hdev);
9457         if (ret) {
9458                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9459                 goto err_msi_irq_uninit;
9460         }
9461
9462         ret = hclge_alloc_vport(hdev);
9463         if (ret) {
9464                 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
9465                 goto err_msi_irq_uninit;
9466         }
9467
9468         ret = hclge_map_tqp(hdev);
9469         if (ret) {
9470                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9471                 goto err_msi_irq_uninit;
9472         }
9473
9474         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9475                 ret = hclge_mac_mdio_config(hdev);
9476                 if (ret) {
9477                         dev_err(&hdev->pdev->dev,
9478                                 "mdio config fail ret=%d\n", ret);
9479                         goto err_msi_irq_uninit;
9480                 }
9481         }
9482
9483         ret = hclge_init_umv_space(hdev);
9484         if (ret) {
9485                 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
9486                 goto err_mdiobus_unreg;
9487         }
9488
9489         ret = hclge_mac_init(hdev);
9490         if (ret) {
9491                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9492                 goto err_mdiobus_unreg;
9493         }
9494
9495         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9496         if (ret) {
9497                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9498                 goto err_mdiobus_unreg;
9499         }
9500
9501         ret = hclge_config_gro(hdev, true);
9502         if (ret)
9503                 goto err_mdiobus_unreg;
9504
9505         ret = hclge_init_vlan_config(hdev);
9506         if (ret) {
9507                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9508                 goto err_mdiobus_unreg;
9509         }
9510
9511         ret = hclge_tm_schd_init(hdev);
9512         if (ret) {
9513                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
9514                 goto err_mdiobus_unreg;
9515         }
9516
9517         hclge_rss_init_cfg(hdev);
9518         ret = hclge_rss_init_hw(hdev);
9519         if (ret) {
9520                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9521                 goto err_mdiobus_unreg;
9522         }
9523
9524         ret = init_mgr_tbl(hdev);
9525         if (ret) {
9526                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
9527                 goto err_mdiobus_unreg;
9528         }
9529
9530         ret = hclge_init_fd_config(hdev);
9531         if (ret) {
9532                 dev_err(&pdev->dev,
9533                         "fd table init fail, ret=%d\n", ret);
9534                 goto err_mdiobus_unreg;
9535         }
9536
9537         INIT_KFIFO(hdev->mac_tnl_log);
9538
9539         hclge_dcb_ops_set(hdev);
9540
9541         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
9542         INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
9543
9544         /* Setup affinity after service timer setup because add_timer_on
9545          * is called in affinity notify.
9546          */
9547         hclge_misc_affinity_setup(hdev);
9548
9549         hclge_clear_all_event_cause(hdev);
9550         hclge_clear_resetting_state(hdev);
9551
9552         /* Log and clear the hw errors those already occurred */
9553         hclge_handle_all_hns_hw_errors(ae_dev);
9554
9555         /* request delayed reset for the error recovery because an immediate
9556          * global reset on a PF affecting pending initialization of other PFs
9557          */
9558         if (ae_dev->hw_err_reset_req) {
9559                 enum hnae3_reset_type reset_level;
9560
9561                 reset_level = hclge_get_reset_level(ae_dev,
9562                                                     &ae_dev->hw_err_reset_req);
9563                 hclge_set_def_reset_request(ae_dev, reset_level);
9564                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9565         }
9566
9567         /* Enable MISC vector(vector0) */
9568         hclge_enable_vector(&hdev->misc_vector, true);
9569
9570         hclge_state_init(hdev);
9571         hdev->last_reset_time = jiffies;
9572
9573         dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
9574                  HCLGE_DRIVER_NAME);
9575
9576         hclge_task_schedule(hdev, round_jiffies_relative(HZ));
9577
9578         return 0;
9579
9580 err_mdiobus_unreg:
9581         if (hdev->hw.mac.phydev)
9582                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
9583 err_msi_irq_uninit:
9584         hclge_misc_irq_uninit(hdev);
9585 err_msi_uninit:
9586         pci_free_irq_vectors(pdev);
9587 err_cmd_uninit:
9588         hclge_cmd_uninit(hdev);
9589 err_pci_uninit:
9590         pcim_iounmap(pdev, hdev->hw.io_base);
9591         pci_clear_master(pdev);
9592         pci_release_regions(pdev);
9593         pci_disable_device(pdev);
9594 out:
9595         return ret;
9596 }
9597
9598 static void hclge_stats_clear(struct hclge_dev *hdev)
9599 {
9600         memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
9601 }
9602
9603 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9604 {
9605         return hclge_config_switch_param(hdev, vf, enable,
9606                                          HCLGE_SWITCH_ANTI_SPOOF_MASK);
9607 }
9608
9609 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9610 {
9611         return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9612                                           HCLGE_FILTER_FE_NIC_INGRESS_B,
9613                                           enable, vf);
9614 }
9615
9616 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
9617 {
9618         int ret;
9619
9620         ret = hclge_set_mac_spoofchk(hdev, vf, enable);
9621         if (ret) {
9622                 dev_err(&hdev->pdev->dev,
9623                         "Set vf %d mac spoof check %s failed, ret=%d\n",
9624                         vf, enable ? "on" : "off", ret);
9625                 return ret;
9626         }
9627
9628         ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
9629         if (ret)
9630                 dev_err(&hdev->pdev->dev,
9631                         "Set vf %d vlan spoof check %s failed, ret=%d\n",
9632                         vf, enable ? "on" : "off", ret);
9633
9634         return ret;
9635 }
9636
9637 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
9638                                  bool enable)
9639 {
9640         struct hclge_vport *vport = hclge_get_vport(handle);
9641         struct hclge_dev *hdev = vport->back;
9642         u32 new_spoofchk = enable ? 1 : 0;
9643         int ret;
9644
9645         if (hdev->pdev->revision == 0x20)
9646                 return -EOPNOTSUPP;
9647
9648         vport = hclge_get_vf_vport(hdev, vf);
9649         if (!vport)
9650                 return -EINVAL;
9651
9652         if (vport->vf_info.spoofchk == new_spoofchk)
9653                 return 0;
9654
9655         if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
9656                 dev_warn(&hdev->pdev->dev,
9657                          "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
9658                          vf);
9659         else if (enable && hclge_is_umv_space_full(vport))
9660                 dev_warn(&hdev->pdev->dev,
9661                          "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
9662                          vf);
9663
9664         ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
9665         if (ret)
9666                 return ret;
9667
9668         vport->vf_info.spoofchk = new_spoofchk;
9669         return 0;
9670 }
9671
9672 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
9673 {
9674         struct hclge_vport *vport = hdev->vport;
9675         int ret;
9676         int i;
9677
9678         if (hdev->pdev->revision == 0x20)
9679                 return 0;
9680
9681         /* resume the vf spoof check state after reset */
9682         for (i = 0; i < hdev->num_alloc_vport; i++) {
9683                 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
9684                                                vport->vf_info.spoofchk);
9685                 if (ret)
9686                         return ret;
9687
9688                 vport++;
9689         }
9690
9691         return 0;
9692 }
9693
9694 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
9695 {
9696         struct hclge_vport *vport = hclge_get_vport(handle);
9697         struct hclge_dev *hdev = vport->back;
9698         u32 new_trusted = enable ? 1 : 0;
9699         bool en_bc_pmc;
9700         int ret;
9701
9702         vport = hclge_get_vf_vport(hdev, vf);
9703         if (!vport)
9704                 return -EINVAL;
9705
9706         if (vport->vf_info.trusted == new_trusted)
9707                 return 0;
9708
9709         /* Disable promisc mode for VF if it is not trusted any more. */
9710         if (!enable && vport->vf_info.promisc_enable) {
9711                 en_bc_pmc = hdev->pdev->revision != 0x20;
9712                 ret = hclge_set_vport_promisc_mode(vport, false, false,
9713                                                    en_bc_pmc);
9714                 if (ret)
9715                         return ret;
9716                 vport->vf_info.promisc_enable = 0;
9717                 hclge_inform_vf_promisc_info(vport);
9718         }
9719
9720         vport->vf_info.trusted = new_trusted;
9721
9722         return 0;
9723 }
9724
9725 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
9726 {
9727         int ret;
9728         int vf;
9729
9730         /* reset vf rate to default value */
9731         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9732                 struct hclge_vport *vport = &hdev->vport[vf];
9733
9734                 vport->vf_info.max_tx_rate = 0;
9735                 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
9736                 if (ret)
9737                         dev_err(&hdev->pdev->dev,
9738                                 "vf%d failed to reset to default, ret=%d\n",
9739                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
9740         }
9741 }
9742
9743 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
9744                                      int min_tx_rate, int max_tx_rate)
9745 {
9746         if (min_tx_rate != 0 ||
9747             max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
9748                 dev_err(&hdev->pdev->dev,
9749                         "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
9750                         min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
9751                 return -EINVAL;
9752         }
9753
9754         return 0;
9755 }
9756
9757 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
9758                              int min_tx_rate, int max_tx_rate, bool force)
9759 {
9760         struct hclge_vport *vport = hclge_get_vport(handle);
9761         struct hclge_dev *hdev = vport->back;
9762         int ret;
9763
9764         ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
9765         if (ret)
9766                 return ret;
9767
9768         vport = hclge_get_vf_vport(hdev, vf);
9769         if (!vport)
9770                 return -EINVAL;
9771
9772         if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
9773                 return 0;
9774
9775         ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
9776         if (ret)
9777                 return ret;
9778
9779         vport->vf_info.max_tx_rate = max_tx_rate;
9780
9781         return 0;
9782 }
9783
9784 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
9785 {
9786         struct hnae3_handle *handle = &hdev->vport->nic;
9787         struct hclge_vport *vport;
9788         int ret;
9789         int vf;
9790
9791         /* resume the vf max_tx_rate after reset */
9792         for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
9793                 vport = hclge_get_vf_vport(hdev, vf);
9794                 if (!vport)
9795                         return -EINVAL;
9796
9797                 /* zero means max rate, after reset, firmware already set it to
9798                  * max rate, so just continue.
9799                  */
9800                 if (!vport->vf_info.max_tx_rate)
9801                         continue;
9802
9803                 ret = hclge_set_vf_rate(handle, vf, 0,
9804                                         vport->vf_info.max_tx_rate, true);
9805                 if (ret) {
9806                         dev_err(&hdev->pdev->dev,
9807                                 "vf%d failed to resume tx_rate:%u, ret=%d\n",
9808                                 vf, vport->vf_info.max_tx_rate, ret);
9809                         return ret;
9810                 }
9811         }
9812
9813         return 0;
9814 }
9815
9816 static void hclge_reset_vport_state(struct hclge_dev *hdev)
9817 {
9818         struct hclge_vport *vport = hdev->vport;
9819         int i;
9820
9821         for (i = 0; i < hdev->num_alloc_vport; i++) {
9822                 hclge_vport_stop(vport);
9823                 vport++;
9824         }
9825 }
9826
9827 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
9828 {
9829         struct hclge_dev *hdev = ae_dev->priv;
9830         struct pci_dev *pdev = ae_dev->pdev;
9831         int ret;
9832
9833         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9834
9835         hclge_stats_clear(hdev);
9836         memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
9837         memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
9838
9839         ret = hclge_cmd_init(hdev);
9840         if (ret) {
9841                 dev_err(&pdev->dev, "Cmd queue init failed\n");
9842                 return ret;
9843         }
9844
9845         ret = hclge_map_tqp(hdev);
9846         if (ret) {
9847                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9848                 return ret;
9849         }
9850
9851         hclge_reset_umv_space(hdev);
9852
9853         ret = hclge_mac_init(hdev);
9854         if (ret) {
9855                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9856                 return ret;
9857         }
9858
9859         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9860         if (ret) {
9861                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9862                 return ret;
9863         }
9864
9865         ret = hclge_config_gro(hdev, true);
9866         if (ret)
9867                 return ret;
9868
9869         ret = hclge_init_vlan_config(hdev);
9870         if (ret) {
9871                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9872                 return ret;
9873         }
9874
9875         ret = hclge_tm_init_hw(hdev, true);
9876         if (ret) {
9877                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
9878                 return ret;
9879         }
9880
9881         ret = hclge_rss_init_hw(hdev);
9882         if (ret) {
9883                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9884                 return ret;
9885         }
9886
9887         ret = hclge_init_fd_config(hdev);
9888         if (ret) {
9889                 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
9890                 return ret;
9891         }
9892
9893         /* Log and clear the hw errors those already occurred */
9894         hclge_handle_all_hns_hw_errors(ae_dev);
9895
9896         /* Re-enable the hw error interrupts because
9897          * the interrupts get disabled on global reset.
9898          */
9899         ret = hclge_config_nic_hw_error(hdev, true);
9900         if (ret) {
9901                 dev_err(&pdev->dev,
9902                         "fail(%d) to re-enable NIC hw error interrupts\n",
9903                         ret);
9904                 return ret;
9905         }
9906
9907         if (hdev->roce_client) {
9908                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9909                 if (ret) {
9910                         dev_err(&pdev->dev,
9911                                 "fail(%d) to re-enable roce ras interrupts\n",
9912                                 ret);
9913                         return ret;
9914                 }
9915         }
9916
9917         hclge_reset_vport_state(hdev);
9918         ret = hclge_reset_vport_spoofchk(hdev);
9919         if (ret)
9920                 return ret;
9921
9922         ret = hclge_resume_vf_rate(hdev);
9923         if (ret)
9924                 return ret;
9925
9926         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
9927                  HCLGE_DRIVER_NAME);
9928
9929         return 0;
9930 }
9931
9932 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
9933 {
9934         struct hclge_dev *hdev = ae_dev->priv;
9935         struct hclge_mac *mac = &hdev->hw.mac;
9936
9937         hclge_reset_vf_rate(hdev);
9938         hclge_misc_affinity_teardown(hdev);
9939         hclge_state_uninit(hdev);
9940
9941         if (mac->phydev)
9942                 mdiobus_unregister(mac->mdio_bus);
9943
9944         hclge_uninit_umv_space(hdev);
9945
9946         /* Disable MISC vector(vector0) */
9947         hclge_enable_vector(&hdev->misc_vector, false);
9948         synchronize_irq(hdev->misc_vector.vector_irq);
9949
9950         /* Disable all hw interrupts */
9951         hclge_config_mac_tnl_int(hdev, false);
9952         hclge_config_nic_hw_error(hdev, false);
9953         hclge_config_rocee_ras_interrupt(hdev, false);
9954
9955         hclge_cmd_uninit(hdev);
9956         hclge_misc_irq_uninit(hdev);
9957         hclge_pci_uninit(hdev);
9958         mutex_destroy(&hdev->vport_lock);
9959         hclge_uninit_vport_mac_table(hdev);
9960         hclge_uninit_vport_vlan_table(hdev);
9961         ae_dev->priv = NULL;
9962 }
9963
9964 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9965 {
9966         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9967         struct hclge_vport *vport = hclge_get_vport(handle);
9968         struct hclge_dev *hdev = vport->back;
9969
9970         return min_t(u32, hdev->rss_size_max,
9971                      vport->alloc_tqps / kinfo->num_tc);
9972 }
9973
9974 static void hclge_get_channels(struct hnae3_handle *handle,
9975                                struct ethtool_channels *ch)
9976 {
9977         ch->max_combined = hclge_get_max_channels(handle);
9978         ch->other_count = 1;
9979         ch->max_other = 1;
9980         ch->combined_count = handle->kinfo.rss_size;
9981 }
9982
9983 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9984                                         u16 *alloc_tqps, u16 *max_rss_size)
9985 {
9986         struct hclge_vport *vport = hclge_get_vport(handle);
9987         struct hclge_dev *hdev = vport->back;
9988
9989         *alloc_tqps = vport->alloc_tqps;
9990         *max_rss_size = hdev->rss_size_max;
9991 }
9992
9993 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9994                               bool rxfh_configured)
9995 {
9996         struct hclge_vport *vport = hclge_get_vport(handle);
9997         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9998         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9999         struct hclge_dev *hdev = vport->back;
10000         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
10001         u16 cur_rss_size = kinfo->rss_size;
10002         u16 cur_tqps = kinfo->num_tqps;
10003         u16 tc_valid[HCLGE_MAX_TC_NUM];
10004         u16 roundup_size;
10005         u32 *rss_indir;
10006         unsigned int i;
10007         int ret;
10008
10009         kinfo->req_rss_size = new_tqps_num;
10010
10011         ret = hclge_tm_vport_map_update(hdev);
10012         if (ret) {
10013                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
10014                 return ret;
10015         }
10016
10017         roundup_size = roundup_pow_of_two(kinfo->rss_size);
10018         roundup_size = ilog2(roundup_size);
10019         /* Set the RSS TC mode according to the new RSS size */
10020         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
10021                 tc_valid[i] = 0;
10022
10023                 if (!(hdev->hw_tc_map & BIT(i)))
10024                         continue;
10025
10026                 tc_valid[i] = 1;
10027                 tc_size[i] = roundup_size;
10028                 tc_offset[i] = kinfo->rss_size * i;
10029         }
10030         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
10031         if (ret)
10032                 return ret;
10033
10034         /* RSS indirection table has been configuared by user */
10035         if (rxfh_configured)
10036                 goto out;
10037
10038         /* Reinitializes the rss indirect table according to the new RSS size */
10039         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10040         if (!rss_indir)
10041                 return -ENOMEM;
10042
10043         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10044                 rss_indir[i] = i % kinfo->rss_size;
10045
10046         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10047         if (ret)
10048                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10049                         ret);
10050
10051         kfree(rss_indir);
10052
10053 out:
10054         if (!ret)
10055                 dev_info(&hdev->pdev->dev,
10056                          "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10057                          cur_rss_size, kinfo->rss_size,
10058                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
10059
10060         return ret;
10061 }
10062
10063 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10064                               u32 *regs_num_64_bit)
10065 {
10066         struct hclge_desc desc;
10067         u32 total_num;
10068         int ret;
10069
10070         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10071         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10072         if (ret) {
10073                 dev_err(&hdev->pdev->dev,
10074                         "Query register number cmd failed, ret = %d.\n", ret);
10075                 return ret;
10076         }
10077
10078         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
10079         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
10080
10081         total_num = *regs_num_32_bit + *regs_num_64_bit;
10082         if (!total_num)
10083                 return -EINVAL;
10084
10085         return 0;
10086 }
10087
10088 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10089                                  void *data)
10090 {
10091 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10092 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10093
10094         struct hclge_desc *desc;
10095         u32 *reg_val = data;
10096         __le32 *desc_data;
10097         int nodata_num;
10098         int cmd_num;
10099         int i, k, n;
10100         int ret;
10101
10102         if (regs_num == 0)
10103                 return 0;
10104
10105         nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10106         cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10107                                HCLGE_32_BIT_REG_RTN_DATANUM);
10108         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10109         if (!desc)
10110                 return -ENOMEM;
10111
10112         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10113         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10114         if (ret) {
10115                 dev_err(&hdev->pdev->dev,
10116                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
10117                 kfree(desc);
10118                 return ret;
10119         }
10120
10121         for (i = 0; i < cmd_num; i++) {
10122                 if (i == 0) {
10123                         desc_data = (__le32 *)(&desc[i].data[0]);
10124                         n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10125                 } else {
10126                         desc_data = (__le32 *)(&desc[i]);
10127                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
10128                 }
10129                 for (k = 0; k < n; k++) {
10130                         *reg_val++ = le32_to_cpu(*desc_data++);
10131
10132                         regs_num--;
10133                         if (!regs_num)
10134                                 break;
10135                 }
10136         }
10137
10138         kfree(desc);
10139         return 0;
10140 }
10141
10142 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10143                                  void *data)
10144 {
10145 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10146 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10147
10148         struct hclge_desc *desc;
10149         u64 *reg_val = data;
10150         __le64 *desc_data;
10151         int nodata_len;
10152         int cmd_num;
10153         int i, k, n;
10154         int ret;
10155
10156         if (regs_num == 0)
10157                 return 0;
10158
10159         nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10160         cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10161                                HCLGE_64_BIT_REG_RTN_DATANUM);
10162         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10163         if (!desc)
10164                 return -ENOMEM;
10165
10166         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10167         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10168         if (ret) {
10169                 dev_err(&hdev->pdev->dev,
10170                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
10171                 kfree(desc);
10172                 return ret;
10173         }
10174
10175         for (i = 0; i < cmd_num; i++) {
10176                 if (i == 0) {
10177                         desc_data = (__le64 *)(&desc[i].data[0]);
10178                         n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10179                 } else {
10180                         desc_data = (__le64 *)(&desc[i]);
10181                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
10182                 }
10183                 for (k = 0; k < n; k++) {
10184                         *reg_val++ = le64_to_cpu(*desc_data++);
10185
10186                         regs_num--;
10187                         if (!regs_num)
10188                                 break;
10189                 }
10190         }
10191
10192         kfree(desc);
10193         return 0;
10194 }
10195
10196 #define MAX_SEPARATE_NUM        4
10197 #define SEPARATOR_VALUE         0xFDFCFBFA
10198 #define REG_NUM_PER_LINE        4
10199 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
10200 #define REG_SEPARATOR_LINE      1
10201 #define REG_NUM_REMAIN_MASK     3
10202 #define BD_LIST_MAX_NUM         30
10203
10204 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10205 {
10206         /*prepare 4 commands to query DFX BD number*/
10207         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
10208         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10209         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
10210         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10211         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
10212         desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10213         hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
10214
10215         return hclge_cmd_send(&hdev->hw, desc, 4);
10216 }
10217
10218 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10219                                     int *bd_num_list,
10220                                     u32 type_num)
10221 {
10222 #define HCLGE_DFX_REG_BD_NUM    4
10223
10224         u32 entries_per_desc, desc_index, index, offset, i;
10225         struct hclge_desc desc[HCLGE_DFX_REG_BD_NUM];
10226         int ret;
10227
10228         ret = hclge_query_bd_num_cmd_send(hdev, desc);
10229         if (ret) {
10230                 dev_err(&hdev->pdev->dev,
10231                         "Get dfx bd num fail, status is %d.\n", ret);
10232                 return ret;
10233         }
10234
10235         entries_per_desc = ARRAY_SIZE(desc[0].data);
10236         for (i = 0; i < type_num; i++) {
10237                 offset = hclge_dfx_bd_offset_list[i];
10238                 index = offset % entries_per_desc;
10239                 desc_index = offset / entries_per_desc;
10240                 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10241         }
10242
10243         return ret;
10244 }
10245
10246 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10247                                   struct hclge_desc *desc_src, int bd_num,
10248                                   enum hclge_opcode_type cmd)
10249 {
10250         struct hclge_desc *desc = desc_src;
10251         int i, ret;
10252
10253         hclge_cmd_setup_basic_desc(desc, cmd, true);
10254         for (i = 0; i < bd_num - 1; i++) {
10255                 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10256                 desc++;
10257                 hclge_cmd_setup_basic_desc(desc, cmd, true);
10258         }
10259
10260         desc = desc_src;
10261         ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10262         if (ret)
10263                 dev_err(&hdev->pdev->dev,
10264                         "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10265                         cmd, ret);
10266
10267         return ret;
10268 }
10269
10270 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10271                                     void *data)
10272 {
10273         int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10274         struct hclge_desc *desc = desc_src;
10275         u32 *reg = data;
10276
10277         entries_per_desc = ARRAY_SIZE(desc->data);
10278         reg_num = entries_per_desc * bd_num;
10279         separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10280         for (i = 0; i < reg_num; i++) {
10281                 index = i % entries_per_desc;
10282                 desc_index = i / entries_per_desc;
10283                 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10284         }
10285         for (i = 0; i < separator_num; i++)
10286                 *reg++ = SEPARATOR_VALUE;
10287
10288         return reg_num + separator_num;
10289 }
10290
10291 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10292 {
10293         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10294         int data_len_per_desc, data_len, bd_num, i;
10295         int bd_num_list[BD_LIST_MAX_NUM];
10296         int ret;
10297
10298         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10299         if (ret) {
10300                 dev_err(&hdev->pdev->dev,
10301                         "Get dfx reg bd num fail, status is %d.\n", ret);
10302                 return ret;
10303         }
10304
10305         data_len_per_desc = sizeof_field(struct hclge_desc, data);
10306         *len = 0;
10307         for (i = 0; i < dfx_reg_type_num; i++) {
10308                 bd_num = bd_num_list[i];
10309                 data_len = data_len_per_desc * bd_num;
10310                 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10311         }
10312
10313         return ret;
10314 }
10315
10316 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10317 {
10318         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10319         int bd_num, bd_num_max, buf_len, i;
10320         int bd_num_list[BD_LIST_MAX_NUM];
10321         struct hclge_desc *desc_src;
10322         u32 *reg = data;
10323         int ret;
10324
10325         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10326         if (ret) {
10327                 dev_err(&hdev->pdev->dev,
10328                         "Get dfx reg bd num fail, status is %d.\n", ret);
10329                 return ret;
10330         }
10331
10332         bd_num_max = bd_num_list[0];
10333         for (i = 1; i < dfx_reg_type_num; i++)
10334                 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10335
10336         buf_len = sizeof(*desc_src) * bd_num_max;
10337         desc_src = kzalloc(buf_len, GFP_KERNEL);
10338         if (!desc_src) {
10339                 dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__);
10340                 return -ENOMEM;
10341         }
10342
10343         for (i = 0; i < dfx_reg_type_num; i++) {
10344                 bd_num = bd_num_list[i];
10345                 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10346                                              hclge_dfx_reg_opcode_list[i]);
10347                 if (ret) {
10348                         dev_err(&hdev->pdev->dev,
10349                                 "Get dfx reg fail, status is %d.\n", ret);
10350                         break;
10351                 }
10352
10353                 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10354         }
10355
10356         kfree(desc_src);
10357         return ret;
10358 }
10359
10360 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10361                               struct hnae3_knic_private_info *kinfo)
10362 {
10363 #define HCLGE_RING_REG_OFFSET           0x200
10364 #define HCLGE_RING_INT_REG_OFFSET       0x4
10365
10366         int i, j, reg_num, separator_num;
10367         int data_num_sum;
10368         u32 *reg = data;
10369
10370         /* fetching per-PF registers valus from PF PCIe register space */
10371         reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
10372         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10373         for (i = 0; i < reg_num; i++)
10374                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
10375         for (i = 0; i < separator_num; i++)
10376                 *reg++ = SEPARATOR_VALUE;
10377         data_num_sum = reg_num + separator_num;
10378
10379         reg_num = ARRAY_SIZE(common_reg_addr_list);
10380         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10381         for (i = 0; i < reg_num; i++)
10382                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
10383         for (i = 0; i < separator_num; i++)
10384                 *reg++ = SEPARATOR_VALUE;
10385         data_num_sum += reg_num + separator_num;
10386
10387         reg_num = ARRAY_SIZE(ring_reg_addr_list);
10388         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10389         for (j = 0; j < kinfo->num_tqps; j++) {
10390                 for (i = 0; i < reg_num; i++)
10391                         *reg++ = hclge_read_dev(&hdev->hw,
10392                                                 ring_reg_addr_list[i] +
10393                                                 HCLGE_RING_REG_OFFSET * j);
10394                 for (i = 0; i < separator_num; i++)
10395                         *reg++ = SEPARATOR_VALUE;
10396         }
10397         data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
10398
10399         reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
10400         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10401         for (j = 0; j < hdev->num_msi_used - 1; j++) {
10402                 for (i = 0; i < reg_num; i++)
10403                         *reg++ = hclge_read_dev(&hdev->hw,
10404                                                 tqp_intr_reg_addr_list[i] +
10405                                                 HCLGE_RING_INT_REG_OFFSET * j);
10406                 for (i = 0; i < separator_num; i++)
10407                         *reg++ = SEPARATOR_VALUE;
10408         }
10409         data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
10410
10411         return data_num_sum;
10412 }
10413
10414 static int hclge_get_regs_len(struct hnae3_handle *handle)
10415 {
10416         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
10417         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10418         struct hclge_vport *vport = hclge_get_vport(handle);
10419         struct hclge_dev *hdev = vport->back;
10420         int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
10421         int regs_lines_32_bit, regs_lines_64_bit;
10422         int ret;
10423
10424         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10425         if (ret) {
10426                 dev_err(&hdev->pdev->dev,
10427                         "Get register number failed, ret = %d.\n", ret);
10428                 return ret;
10429         }
10430
10431         ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
10432         if (ret) {
10433                 dev_err(&hdev->pdev->dev,
10434                         "Get dfx reg len failed, ret = %d.\n", ret);
10435                 return ret;
10436         }
10437
10438         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
10439                 REG_SEPARATOR_LINE;
10440         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
10441                 REG_SEPARATOR_LINE;
10442         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10443                 REG_SEPARATOR_LINE;
10444         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10445                 REG_SEPARATOR_LINE;
10446         regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10447                 REG_SEPARATOR_LINE;
10448         regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10449                 REG_SEPARATOR_LINE;
10450
10451         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10452                 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10453                 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10454 }
10455
10456 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10457                            void *data)
10458 {
10459         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10460         struct hclge_vport *vport = hclge_get_vport(handle);
10461         struct hclge_dev *hdev = vport->back;
10462         u32 regs_num_32_bit, regs_num_64_bit;
10463         int i, reg_num, separator_num, ret;
10464         u32 *reg = data;
10465
10466         *version = hdev->fw_version;
10467
10468         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10469         if (ret) {
10470                 dev_err(&hdev->pdev->dev,
10471                         "Get register number failed, ret = %d.\n", ret);
10472                 return;
10473         }
10474
10475         reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
10476
10477         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
10478         if (ret) {
10479                 dev_err(&hdev->pdev->dev,
10480                         "Get 32 bit register failed, ret = %d.\n", ret);
10481                 return;
10482         }
10483         reg_num = regs_num_32_bit;
10484         reg += reg_num;
10485         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10486         for (i = 0; i < separator_num; i++)
10487                 *reg++ = SEPARATOR_VALUE;
10488
10489         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
10490         if (ret) {
10491                 dev_err(&hdev->pdev->dev,
10492                         "Get 64 bit register failed, ret = %d.\n", ret);
10493                 return;
10494         }
10495         reg_num = regs_num_64_bit * 2;
10496         reg += reg_num;
10497         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10498         for (i = 0; i < separator_num; i++)
10499                 *reg++ = SEPARATOR_VALUE;
10500
10501         ret = hclge_get_dfx_reg(hdev, reg);
10502         if (ret)
10503                 dev_err(&hdev->pdev->dev,
10504                         "Get dfx register failed, ret = %d.\n", ret);
10505 }
10506
10507 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
10508 {
10509         struct hclge_set_led_state_cmd *req;
10510         struct hclge_desc desc;
10511         int ret;
10512
10513         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
10514
10515         req = (struct hclge_set_led_state_cmd *)desc.data;
10516         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
10517                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
10518
10519         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10520         if (ret)
10521                 dev_err(&hdev->pdev->dev,
10522                         "Send set led state cmd error, ret =%d\n", ret);
10523
10524         return ret;
10525 }
10526
10527 enum hclge_led_status {
10528         HCLGE_LED_OFF,
10529         HCLGE_LED_ON,
10530         HCLGE_LED_NO_CHANGE = 0xFF,
10531 };
10532
10533 static int hclge_set_led_id(struct hnae3_handle *handle,
10534                             enum ethtool_phys_id_state status)
10535 {
10536         struct hclge_vport *vport = hclge_get_vport(handle);
10537         struct hclge_dev *hdev = vport->back;
10538
10539         switch (status) {
10540         case ETHTOOL_ID_ACTIVE:
10541                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
10542         case ETHTOOL_ID_INACTIVE:
10543                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
10544         default:
10545                 return -EINVAL;
10546         }
10547 }
10548
10549 static void hclge_get_link_mode(struct hnae3_handle *handle,
10550                                 unsigned long *supported,
10551                                 unsigned long *advertising)
10552 {
10553         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
10554         struct hclge_vport *vport = hclge_get_vport(handle);
10555         struct hclge_dev *hdev = vport->back;
10556         unsigned int idx = 0;
10557
10558         for (; idx < size; idx++) {
10559                 supported[idx] = hdev->hw.mac.supported[idx];
10560                 advertising[idx] = hdev->hw.mac.advertising[idx];
10561         }
10562 }
10563
10564 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
10565 {
10566         struct hclge_vport *vport = hclge_get_vport(handle);
10567         struct hclge_dev *hdev = vport->back;
10568
10569         return hclge_config_gro(hdev, enable);
10570 }
10571
10572 static const struct hnae3_ae_ops hclge_ops = {
10573         .init_ae_dev = hclge_init_ae_dev,
10574         .uninit_ae_dev = hclge_uninit_ae_dev,
10575         .flr_prepare = hclge_flr_prepare,
10576         .flr_done = hclge_flr_done,
10577         .init_client_instance = hclge_init_client_instance,
10578         .uninit_client_instance = hclge_uninit_client_instance,
10579         .map_ring_to_vector = hclge_map_ring_to_vector,
10580         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
10581         .get_vector = hclge_get_vector,
10582         .put_vector = hclge_put_vector,
10583         .set_promisc_mode = hclge_set_promisc_mode,
10584         .set_loopback = hclge_set_loopback,
10585         .start = hclge_ae_start,
10586         .stop = hclge_ae_stop,
10587         .client_start = hclge_client_start,
10588         .client_stop = hclge_client_stop,
10589         .get_status = hclge_get_status,
10590         .get_ksettings_an_result = hclge_get_ksettings_an_result,
10591         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
10592         .get_media_type = hclge_get_media_type,
10593         .check_port_speed = hclge_check_port_speed,
10594         .get_fec = hclge_get_fec,
10595         .set_fec = hclge_set_fec,
10596         .get_rss_key_size = hclge_get_rss_key_size,
10597         .get_rss_indir_size = hclge_get_rss_indir_size,
10598         .get_rss = hclge_get_rss,
10599         .set_rss = hclge_set_rss,
10600         .set_rss_tuple = hclge_set_rss_tuple,
10601         .get_rss_tuple = hclge_get_rss_tuple,
10602         .get_tc_size = hclge_get_tc_size,
10603         .get_mac_addr = hclge_get_mac_addr,
10604         .set_mac_addr = hclge_set_mac_addr,
10605         .do_ioctl = hclge_do_ioctl,
10606         .add_uc_addr = hclge_add_uc_addr,
10607         .rm_uc_addr = hclge_rm_uc_addr,
10608         .add_mc_addr = hclge_add_mc_addr,
10609         .rm_mc_addr = hclge_rm_mc_addr,
10610         .set_autoneg = hclge_set_autoneg,
10611         .get_autoneg = hclge_get_autoneg,
10612         .restart_autoneg = hclge_restart_autoneg,
10613         .halt_autoneg = hclge_halt_autoneg,
10614         .get_pauseparam = hclge_get_pauseparam,
10615         .set_pauseparam = hclge_set_pauseparam,
10616         .set_mtu = hclge_set_mtu,
10617         .reset_queue = hclge_reset_tqp,
10618         .get_stats = hclge_get_stats,
10619         .get_mac_stats = hclge_get_mac_stat,
10620         .update_stats = hclge_update_stats,
10621         .get_strings = hclge_get_strings,
10622         .get_sset_count = hclge_get_sset_count,
10623         .get_fw_version = hclge_get_fw_version,
10624         .get_mdix_mode = hclge_get_mdix_mode,
10625         .enable_vlan_filter = hclge_enable_vlan_filter,
10626         .set_vlan_filter = hclge_set_vlan_filter,
10627         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
10628         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
10629         .reset_event = hclge_reset_event,
10630         .get_reset_level = hclge_get_reset_level,
10631         .set_default_reset_request = hclge_set_def_reset_request,
10632         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
10633         .set_channels = hclge_set_channels,
10634         .get_channels = hclge_get_channels,
10635         .get_regs_len = hclge_get_regs_len,
10636         .get_regs = hclge_get_regs,
10637         .set_led_id = hclge_set_led_id,
10638         .get_link_mode = hclge_get_link_mode,
10639         .add_fd_entry = hclge_add_fd_entry,
10640         .del_fd_entry = hclge_del_fd_entry,
10641         .del_all_fd_entries = hclge_del_all_fd_entries,
10642         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
10643         .get_fd_rule_info = hclge_get_fd_rule_info,
10644         .get_fd_all_rules = hclge_get_all_rules,
10645         .restore_fd_rules = hclge_restore_fd_entries,
10646         .enable_fd = hclge_enable_fd,
10647         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
10648         .dbg_run_cmd = hclge_dbg_run_cmd,
10649         .handle_hw_ras_error = hclge_handle_hw_ras_error,
10650         .get_hw_reset_stat = hclge_get_hw_reset_stat,
10651         .ae_dev_resetting = hclge_ae_dev_resetting,
10652         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
10653         .set_gro_en = hclge_gro_en,
10654         .get_global_queue_id = hclge_covert_handle_qid_global,
10655         .set_timer_task = hclge_set_timer_task,
10656         .mac_connect_phy = hclge_mac_connect_phy,
10657         .mac_disconnect_phy = hclge_mac_disconnect_phy,
10658         .restore_vlan_table = hclge_restore_vlan_table,
10659         .get_vf_config = hclge_get_vf_config,
10660         .set_vf_link_state = hclge_set_vf_link_state,
10661         .set_vf_spoofchk = hclge_set_vf_spoofchk,
10662         .set_vf_trust = hclge_set_vf_trust,
10663         .set_vf_rate = hclge_set_vf_rate,
10664         .set_vf_mac = hclge_set_vf_mac,
10665 };
10666
10667 static struct hnae3_ae_algo ae_algo = {
10668         .ops = &hclge_ops,
10669         .pdev_id_table = ae_algo_pci_tbl,
10670 };
10671
10672 static int hclge_init(void)
10673 {
10674         pr_info("%s is initializing\n", HCLGE_NAME);
10675
10676         hclge_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGE_NAME);
10677         if (!hclge_wq) {
10678                 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
10679                 return -ENOMEM;
10680         }
10681
10682         hnae3_register_ae_algo(&ae_algo);
10683
10684         return 0;
10685 }
10686
10687 static void hclge_exit(void)
10688 {
10689         hnae3_unregister_ae_algo(&ae_algo);
10690         destroy_workqueue(hclge_wq);
10691 }
10692 module_init(hclge_init);
10693 module_exit(hclge_exit);
10694
10695 MODULE_LICENSE("GPL");
10696 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
10697 MODULE_DESCRIPTION("HCLGE Driver");
10698 MODULE_VERSION(HCLGE_MOD_VERSION);