Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME                      "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT     256U
31 #define HCLGE_BUF_MUL_BY        2
32 #define HCLGE_BUF_DIV_BY        2
33 #define NEED_RESERVE_TC_NUM     2
34 #define BUF_MAX_PERCENT         100
35 #define BUF_RESERVE_PERCENT     90
36
37 #define HCLGE_RESET_MAX_FAIL_CNT        5
38 #define HCLGE_RESET_SYNC_TIME           100
39 #define HCLGE_PF_RESET_SYNC_TIME        20
40 #define HCLGE_PF_RESET_SYNC_CNT         1500
41
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET        1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
46 #define HCLGE_DFX_IGU_BD_OFFSET         4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
49 #define HCLGE_DFX_NCSI_BD_OFFSET        7
50 #define HCLGE_DFX_RTC_BD_OFFSET         8
51 #define HCLGE_DFX_PPP_BD_OFFSET         9
52 #define HCLGE_DFX_RCB_BD_OFFSET         10
53 #define HCLGE_DFX_TQP_BD_OFFSET         11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
55
56 #define HCLGE_LINK_STATUS_MS    10
57
58 #define HCLGE_VF_VPORT_START_NUM        1
59
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
66                                u16 *allocated_size, bool is_alloc);
67 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
68 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
69 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
70                                                    unsigned long *addr);
71 static int hclge_set_default_loopback(struct hclge_dev *hdev);
72
73 static struct hnae3_ae_algo ae_algo;
74
75 static struct workqueue_struct *hclge_wq;
76
77 static const struct pci_device_id ae_algo_pci_tbl[] = {
78         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
79         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
80         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
81         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
82         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
83         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
84         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
85         /* required last entry */
86         {0, }
87 };
88
89 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
90
91 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
92                                          HCLGE_CMDQ_TX_ADDR_H_REG,
93                                          HCLGE_CMDQ_TX_DEPTH_REG,
94                                          HCLGE_CMDQ_TX_TAIL_REG,
95                                          HCLGE_CMDQ_TX_HEAD_REG,
96                                          HCLGE_CMDQ_RX_ADDR_L_REG,
97                                          HCLGE_CMDQ_RX_ADDR_H_REG,
98                                          HCLGE_CMDQ_RX_DEPTH_REG,
99                                          HCLGE_CMDQ_RX_TAIL_REG,
100                                          HCLGE_CMDQ_RX_HEAD_REG,
101                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
102                                          HCLGE_CMDQ_INTR_STS_REG,
103                                          HCLGE_CMDQ_INTR_EN_REG,
104                                          HCLGE_CMDQ_INTR_GEN_REG};
105
106 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
107                                            HCLGE_VECTOR0_OTER_EN_REG,
108                                            HCLGE_MISC_RESET_STS_REG,
109                                            HCLGE_MISC_VECTOR_INT_STS,
110                                            HCLGE_GLOBAL_RESET_REG,
111                                            HCLGE_FUN_RST_ING,
112                                            HCLGE_GRO_EN_REG};
113
114 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
115                                          HCLGE_RING_RX_ADDR_H_REG,
116                                          HCLGE_RING_RX_BD_NUM_REG,
117                                          HCLGE_RING_RX_BD_LENGTH_REG,
118                                          HCLGE_RING_RX_MERGE_EN_REG,
119                                          HCLGE_RING_RX_TAIL_REG,
120                                          HCLGE_RING_RX_HEAD_REG,
121                                          HCLGE_RING_RX_FBD_NUM_REG,
122                                          HCLGE_RING_RX_OFFSET_REG,
123                                          HCLGE_RING_RX_FBD_OFFSET_REG,
124                                          HCLGE_RING_RX_STASH_REG,
125                                          HCLGE_RING_RX_BD_ERR_REG,
126                                          HCLGE_RING_TX_ADDR_L_REG,
127                                          HCLGE_RING_TX_ADDR_H_REG,
128                                          HCLGE_RING_TX_BD_NUM_REG,
129                                          HCLGE_RING_TX_PRIORITY_REG,
130                                          HCLGE_RING_TX_TC_REG,
131                                          HCLGE_RING_TX_MERGE_EN_REG,
132                                          HCLGE_RING_TX_TAIL_REG,
133                                          HCLGE_RING_TX_HEAD_REG,
134                                          HCLGE_RING_TX_FBD_NUM_REG,
135                                          HCLGE_RING_TX_OFFSET_REG,
136                                          HCLGE_RING_TX_EBD_NUM_REG,
137                                          HCLGE_RING_TX_EBD_OFFSET_REG,
138                                          HCLGE_RING_TX_BD_ERR_REG,
139                                          HCLGE_RING_EN_REG};
140
141 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
142                                              HCLGE_TQP_INTR_GL0_REG,
143                                              HCLGE_TQP_INTR_GL1_REG,
144                                              HCLGE_TQP_INTR_GL2_REG,
145                                              HCLGE_TQP_INTR_RL_REG};
146
147 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
148         "App    Loopback test",
149         "Serdes serial Loopback test",
150         "Serdes parallel Loopback test",
151         "Phy    Loopback test"
152 };
153
154 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
155         {"mac_tx_mac_pause_num",
156                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
157         {"mac_rx_mac_pause_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
159         {"mac_tx_control_pkt_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
161         {"mac_rx_control_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
163         {"mac_tx_pfc_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
165         {"mac_tx_pfc_pri0_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
167         {"mac_tx_pfc_pri1_pkt_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
169         {"mac_tx_pfc_pri2_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
171         {"mac_tx_pfc_pri3_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
173         {"mac_tx_pfc_pri4_pkt_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
175         {"mac_tx_pfc_pri5_pkt_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
177         {"mac_tx_pfc_pri6_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
179         {"mac_tx_pfc_pri7_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
181         {"mac_rx_pfc_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
183         {"mac_rx_pfc_pri0_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
185         {"mac_rx_pfc_pri1_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
187         {"mac_rx_pfc_pri2_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
189         {"mac_rx_pfc_pri3_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
191         {"mac_rx_pfc_pri4_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
193         {"mac_rx_pfc_pri5_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
195         {"mac_rx_pfc_pri6_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
197         {"mac_rx_pfc_pri7_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
199         {"mac_tx_total_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
201         {"mac_tx_total_oct_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
203         {"mac_tx_good_pkt_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
205         {"mac_tx_bad_pkt_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
207         {"mac_tx_good_oct_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
209         {"mac_tx_bad_oct_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
211         {"mac_tx_uni_pkt_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
213         {"mac_tx_multi_pkt_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
215         {"mac_tx_broad_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
217         {"mac_tx_undersize_pkt_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
219         {"mac_tx_oversize_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
221         {"mac_tx_64_oct_pkt_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
223         {"mac_tx_65_127_oct_pkt_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
225         {"mac_tx_128_255_oct_pkt_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
227         {"mac_tx_256_511_oct_pkt_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
229         {"mac_tx_512_1023_oct_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
231         {"mac_tx_1024_1518_oct_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
233         {"mac_tx_1519_2047_oct_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
235         {"mac_tx_2048_4095_oct_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
237         {"mac_tx_4096_8191_oct_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
239         {"mac_tx_8192_9216_oct_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
241         {"mac_tx_9217_12287_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
243         {"mac_tx_12288_16383_oct_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
245         {"mac_tx_1519_max_good_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
247         {"mac_tx_1519_max_bad_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
249         {"mac_rx_total_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
251         {"mac_rx_total_oct_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
253         {"mac_rx_good_pkt_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
255         {"mac_rx_bad_pkt_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
257         {"mac_rx_good_oct_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
259         {"mac_rx_bad_oct_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
261         {"mac_rx_uni_pkt_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
263         {"mac_rx_multi_pkt_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
265         {"mac_rx_broad_pkt_num",
266                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
267         {"mac_rx_undersize_pkt_num",
268                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
269         {"mac_rx_oversize_pkt_num",
270                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
271         {"mac_rx_64_oct_pkt_num",
272                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
273         {"mac_rx_65_127_oct_pkt_num",
274                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
275         {"mac_rx_128_255_oct_pkt_num",
276                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
277         {"mac_rx_256_511_oct_pkt_num",
278                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
279         {"mac_rx_512_1023_oct_pkt_num",
280                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
281         {"mac_rx_1024_1518_oct_pkt_num",
282                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
283         {"mac_rx_1519_2047_oct_pkt_num",
284                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
285         {"mac_rx_2048_4095_oct_pkt_num",
286                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
287         {"mac_rx_4096_8191_oct_pkt_num",
288                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
289         {"mac_rx_8192_9216_oct_pkt_num",
290                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
291         {"mac_rx_9217_12287_oct_pkt_num",
292                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
293         {"mac_rx_12288_16383_oct_pkt_num",
294                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
295         {"mac_rx_1519_max_good_pkt_num",
296                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
297         {"mac_rx_1519_max_bad_pkt_num",
298                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
299
300         {"mac_tx_fragment_pkt_num",
301                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
302         {"mac_tx_undermin_pkt_num",
303                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
304         {"mac_tx_jabber_pkt_num",
305                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
306         {"mac_tx_err_all_pkt_num",
307                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
308         {"mac_tx_from_app_good_pkt_num",
309                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
310         {"mac_tx_from_app_bad_pkt_num",
311                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
312         {"mac_rx_fragment_pkt_num",
313                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
314         {"mac_rx_undermin_pkt_num",
315                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
316         {"mac_rx_jabber_pkt_num",
317                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
318         {"mac_rx_fcs_err_pkt_num",
319                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
320         {"mac_rx_send_app_good_pkt_num",
321                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
322         {"mac_rx_send_app_bad_pkt_num",
323                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
324 };
325
326 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
327         {
328                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
329                 .ethter_type = cpu_to_le16(ETH_P_LLDP),
330                 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
331                 .i_port_bitmap = 0x1,
332         },
333 };
334
335 static const u8 hclge_hash_key[] = {
336         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
337         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
338         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
339         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
340         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
341 };
342
343 static const u32 hclge_dfx_bd_offset_list[] = {
344         HCLGE_DFX_BIOS_BD_OFFSET,
345         HCLGE_DFX_SSU_0_BD_OFFSET,
346         HCLGE_DFX_SSU_1_BD_OFFSET,
347         HCLGE_DFX_IGU_BD_OFFSET,
348         HCLGE_DFX_RPU_0_BD_OFFSET,
349         HCLGE_DFX_RPU_1_BD_OFFSET,
350         HCLGE_DFX_NCSI_BD_OFFSET,
351         HCLGE_DFX_RTC_BD_OFFSET,
352         HCLGE_DFX_PPP_BD_OFFSET,
353         HCLGE_DFX_RCB_BD_OFFSET,
354         HCLGE_DFX_TQP_BD_OFFSET,
355         HCLGE_DFX_SSU_2_BD_OFFSET
356 };
357
358 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
359         HCLGE_OPC_DFX_BIOS_COMMON_REG,
360         HCLGE_OPC_DFX_SSU_REG_0,
361         HCLGE_OPC_DFX_SSU_REG_1,
362         HCLGE_OPC_DFX_IGU_EGU_REG,
363         HCLGE_OPC_DFX_RPU_REG_0,
364         HCLGE_OPC_DFX_RPU_REG_1,
365         HCLGE_OPC_DFX_NCSI_REG,
366         HCLGE_OPC_DFX_RTC_REG,
367         HCLGE_OPC_DFX_PPP_REG,
368         HCLGE_OPC_DFX_RCB_REG,
369         HCLGE_OPC_DFX_TQP_REG,
370         HCLGE_OPC_DFX_SSU_REG_2
371 };
372
373 static const struct key_info meta_data_key_info[] = {
374         { PACKET_TYPE_ID, 6},
375         { IP_FRAGEMENT, 1},
376         { ROCE_TYPE, 1},
377         { NEXT_KEY, 5},
378         { VLAN_NUMBER, 2},
379         { SRC_VPORT, 12},
380         { DST_VPORT, 12},
381         { TUNNEL_PACKET, 1},
382 };
383
384 static const struct key_info tuple_key_info[] = {
385         { OUTER_DST_MAC, 48},
386         { OUTER_SRC_MAC, 48},
387         { OUTER_VLAN_TAG_FST, 16},
388         { OUTER_VLAN_TAG_SEC, 16},
389         { OUTER_ETH_TYPE, 16},
390         { OUTER_L2_RSV, 16},
391         { OUTER_IP_TOS, 8},
392         { OUTER_IP_PROTO, 8},
393         { OUTER_SRC_IP, 32},
394         { OUTER_DST_IP, 32},
395         { OUTER_L3_RSV, 16},
396         { OUTER_SRC_PORT, 16},
397         { OUTER_DST_PORT, 16},
398         { OUTER_L4_RSV, 32},
399         { OUTER_TUN_VNI, 24},
400         { OUTER_TUN_FLOW_ID, 8},
401         { INNER_DST_MAC, 48},
402         { INNER_SRC_MAC, 48},
403         { INNER_VLAN_TAG_FST, 16},
404         { INNER_VLAN_TAG_SEC, 16},
405         { INNER_ETH_TYPE, 16},
406         { INNER_L2_RSV, 16},
407         { INNER_IP_TOS, 8},
408         { INNER_IP_PROTO, 8},
409         { INNER_SRC_IP, 32},
410         { INNER_DST_IP, 32},
411         { INNER_L3_RSV, 16},
412         { INNER_SRC_PORT, 16},
413         { INNER_DST_PORT, 16},
414         { INNER_L4_RSV, 32},
415 };
416
417 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
418 {
419 #define HCLGE_MAC_CMD_NUM 21
420
421         u64 *data = (u64 *)(&hdev->mac_stats);
422         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
423         __le64 *desc_data;
424         int i, k, n;
425         int ret;
426
427         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
428         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
429         if (ret) {
430                 dev_err(&hdev->pdev->dev,
431                         "Get MAC pkt stats fail, status = %d.\n", ret);
432
433                 return ret;
434         }
435
436         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
437                 /* for special opcode 0032, only the first desc has the head */
438                 if (unlikely(i == 0)) {
439                         desc_data = (__le64 *)(&desc[i].data[0]);
440                         n = HCLGE_RD_FIRST_STATS_NUM;
441                 } else {
442                         desc_data = (__le64 *)(&desc[i]);
443                         n = HCLGE_RD_OTHER_STATS_NUM;
444                 }
445
446                 for (k = 0; k < n; k++) {
447                         *data += le64_to_cpu(*desc_data);
448                         data++;
449                         desc_data++;
450                 }
451         }
452
453         return 0;
454 }
455
456 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
457 {
458         u64 *data = (u64 *)(&hdev->mac_stats);
459         struct hclge_desc *desc;
460         __le64 *desc_data;
461         u16 i, k, n;
462         int ret;
463
464         /* This may be called inside atomic sections,
465          * so GFP_ATOMIC is more suitalbe here
466          */
467         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
468         if (!desc)
469                 return -ENOMEM;
470
471         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
472         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
473         if (ret) {
474                 kfree(desc);
475                 return ret;
476         }
477
478         for (i = 0; i < desc_num; i++) {
479                 /* for special opcode 0034, only the first desc has the head */
480                 if (i == 0) {
481                         desc_data = (__le64 *)(&desc[i].data[0]);
482                         n = HCLGE_RD_FIRST_STATS_NUM;
483                 } else {
484                         desc_data = (__le64 *)(&desc[i]);
485                         n = HCLGE_RD_OTHER_STATS_NUM;
486                 }
487
488                 for (k = 0; k < n; k++) {
489                         *data += le64_to_cpu(*desc_data);
490                         data++;
491                         desc_data++;
492                 }
493         }
494
495         kfree(desc);
496
497         return 0;
498 }
499
500 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
501 {
502         struct hclge_desc desc;
503         __le32 *desc_data;
504         u32 reg_num;
505         int ret;
506
507         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
508         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
509         if (ret)
510                 return ret;
511
512         desc_data = (__le32 *)(&desc.data[0]);
513         reg_num = le32_to_cpu(*desc_data);
514
515         *desc_num = 1 + ((reg_num - 3) >> 2) +
516                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
517
518         return 0;
519 }
520
521 static int hclge_mac_update_stats(struct hclge_dev *hdev)
522 {
523         u32 desc_num;
524         int ret;
525
526         ret = hclge_mac_query_reg_num(hdev, &desc_num);
527
528         /* The firmware supports the new statistics acquisition method */
529         if (!ret)
530                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
531         else if (ret == -EOPNOTSUPP)
532                 ret = hclge_mac_update_stats_defective(hdev);
533         else
534                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
535
536         return ret;
537 }
538
539 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
540 {
541         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
542         struct hclge_vport *vport = hclge_get_vport(handle);
543         struct hclge_dev *hdev = vport->back;
544         struct hnae3_queue *queue;
545         struct hclge_desc desc[1];
546         struct hclge_tqp *tqp;
547         int ret, i;
548
549         for (i = 0; i < kinfo->num_tqps; i++) {
550                 queue = handle->kinfo.tqp[i];
551                 tqp = container_of(queue, struct hclge_tqp, q);
552                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
553                 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
554                                            true);
555
556                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
557                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
558                 if (ret) {
559                         dev_err(&hdev->pdev->dev,
560                                 "Query tqp stat fail, status = %d,queue = %d\n",
561                                 ret, i);
562                         return ret;
563                 }
564                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
565                         le32_to_cpu(desc[0].data[1]);
566         }
567
568         for (i = 0; i < kinfo->num_tqps; i++) {
569                 queue = handle->kinfo.tqp[i];
570                 tqp = container_of(queue, struct hclge_tqp, q);
571                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
572                 hclge_cmd_setup_basic_desc(&desc[0],
573                                            HCLGE_OPC_QUERY_TX_STATUS,
574                                            true);
575
576                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
577                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
578                 if (ret) {
579                         dev_err(&hdev->pdev->dev,
580                                 "Query tqp stat fail, status = %d,queue = %d\n",
581                                 ret, i);
582                         return ret;
583                 }
584                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
585                         le32_to_cpu(desc[0].data[1]);
586         }
587
588         return 0;
589 }
590
591 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
592 {
593         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
594         struct hclge_tqp *tqp;
595         u64 *buff = data;
596         int i;
597
598         for (i = 0; i < kinfo->num_tqps; i++) {
599                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
600                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
601         }
602
603         for (i = 0; i < kinfo->num_tqps; i++) {
604                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
605                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
606         }
607
608         return buff;
609 }
610
611 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
612 {
613         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
614
615         /* each tqp has TX & RX two queues */
616         return kinfo->num_tqps * (2);
617 }
618
619 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
620 {
621         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
622         u8 *buff = data;
623         int i = 0;
624
625         for (i = 0; i < kinfo->num_tqps; i++) {
626                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
627                         struct hclge_tqp, q);
628                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
629                          tqp->index);
630                 buff = buff + ETH_GSTRING_LEN;
631         }
632
633         for (i = 0; i < kinfo->num_tqps; i++) {
634                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
635                         struct hclge_tqp, q);
636                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
637                          tqp->index);
638                 buff = buff + ETH_GSTRING_LEN;
639         }
640
641         return buff;
642 }
643
644 static u64 *hclge_comm_get_stats(const void *comm_stats,
645                                  const struct hclge_comm_stats_str strs[],
646                                  int size, u64 *data)
647 {
648         u64 *buf = data;
649         u32 i;
650
651         for (i = 0; i < size; i++)
652                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
653
654         return buf + size;
655 }
656
657 static u8 *hclge_comm_get_strings(u32 stringset,
658                                   const struct hclge_comm_stats_str strs[],
659                                   int size, u8 *data)
660 {
661         char *buff = (char *)data;
662         u32 i;
663
664         if (stringset != ETH_SS_STATS)
665                 return buff;
666
667         for (i = 0; i < size; i++) {
668                 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
669                 buff = buff + ETH_GSTRING_LEN;
670         }
671
672         return (u8 *)buff;
673 }
674
675 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
676 {
677         struct hnae3_handle *handle;
678         int status;
679
680         handle = &hdev->vport[0].nic;
681         if (handle->client) {
682                 status = hclge_tqps_update_stats(handle);
683                 if (status) {
684                         dev_err(&hdev->pdev->dev,
685                                 "Update TQPS stats fail, status = %d.\n",
686                                 status);
687                 }
688         }
689
690         status = hclge_mac_update_stats(hdev);
691         if (status)
692                 dev_err(&hdev->pdev->dev,
693                         "Update MAC stats fail, status = %d.\n", status);
694 }
695
696 static void hclge_update_stats(struct hnae3_handle *handle,
697                                struct net_device_stats *net_stats)
698 {
699         struct hclge_vport *vport = hclge_get_vport(handle);
700         struct hclge_dev *hdev = vport->back;
701         int status;
702
703         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
704                 return;
705
706         status = hclge_mac_update_stats(hdev);
707         if (status)
708                 dev_err(&hdev->pdev->dev,
709                         "Update MAC stats fail, status = %d.\n",
710                         status);
711
712         status = hclge_tqps_update_stats(handle);
713         if (status)
714                 dev_err(&hdev->pdev->dev,
715                         "Update TQPS stats fail, status = %d.\n",
716                         status);
717
718         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
719 }
720
721 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
722 {
723 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
724                 HNAE3_SUPPORT_PHY_LOOPBACK |\
725                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
726                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
727
728         struct hclge_vport *vport = hclge_get_vport(handle);
729         struct hclge_dev *hdev = vport->back;
730         int count = 0;
731
732         /* Loopback test support rules:
733          * mac: only GE mode support
734          * serdes: all mac mode will support include GE/XGE/LGE/CGE
735          * phy: only support when phy device exist on board
736          */
737         if (stringset == ETH_SS_TEST) {
738                 /* clear loopback bit flags at first */
739                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
740                 if (hdev->pdev->revision >= 0x21 ||
741                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
742                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
743                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
744                         count += 1;
745                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
746                 }
747
748                 count += 2;
749                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
750                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
751
752                 if (hdev->hw.mac.phydev) {
753                         count += 1;
754                         handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
755                 }
756
757         } else if (stringset == ETH_SS_STATS) {
758                 count = ARRAY_SIZE(g_mac_stats_string) +
759                         hclge_tqps_get_sset_count(handle, stringset);
760         }
761
762         return count;
763 }
764
765 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
766                               u8 *data)
767 {
768         u8 *p = (char *)data;
769         int size;
770
771         if (stringset == ETH_SS_STATS) {
772                 size = ARRAY_SIZE(g_mac_stats_string);
773                 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
774                                            size, p);
775                 p = hclge_tqps_get_strings(handle, p);
776         } else if (stringset == ETH_SS_TEST) {
777                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
778                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
779                                ETH_GSTRING_LEN);
780                         p += ETH_GSTRING_LEN;
781                 }
782                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
783                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
784                                ETH_GSTRING_LEN);
785                         p += ETH_GSTRING_LEN;
786                 }
787                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
788                         memcpy(p,
789                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
790                                ETH_GSTRING_LEN);
791                         p += ETH_GSTRING_LEN;
792                 }
793                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
794                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
795                                ETH_GSTRING_LEN);
796                         p += ETH_GSTRING_LEN;
797                 }
798         }
799 }
800
801 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
802 {
803         struct hclge_vport *vport = hclge_get_vport(handle);
804         struct hclge_dev *hdev = vport->back;
805         u64 *p;
806
807         p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
808                                  ARRAY_SIZE(g_mac_stats_string), data);
809         p = hclge_tqps_get_stats(handle, p);
810 }
811
812 static void hclge_get_mac_stat(struct hnae3_handle *handle,
813                                struct hns3_mac_stats *mac_stats)
814 {
815         struct hclge_vport *vport = hclge_get_vport(handle);
816         struct hclge_dev *hdev = vport->back;
817
818         hclge_update_stats(handle, NULL);
819
820         mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
821         mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
822 }
823
824 static int hclge_parse_func_status(struct hclge_dev *hdev,
825                                    struct hclge_func_status_cmd *status)
826 {
827         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
828                 return -EINVAL;
829
830         /* Set the pf to main pf */
831         if (status->pf_state & HCLGE_PF_STATE_MAIN)
832                 hdev->flag |= HCLGE_FLAG_MAIN;
833         else
834                 hdev->flag &= ~HCLGE_FLAG_MAIN;
835
836         return 0;
837 }
838
839 static int hclge_query_function_status(struct hclge_dev *hdev)
840 {
841 #define HCLGE_QUERY_MAX_CNT     5
842
843         struct hclge_func_status_cmd *req;
844         struct hclge_desc desc;
845         int timeout = 0;
846         int ret;
847
848         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
849         req = (struct hclge_func_status_cmd *)desc.data;
850
851         do {
852                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
853                 if (ret) {
854                         dev_err(&hdev->pdev->dev,
855                                 "query function status failed %d.\n", ret);
856                         return ret;
857                 }
858
859                 /* Check pf reset is done */
860                 if (req->pf_state)
861                         break;
862                 usleep_range(1000, 2000);
863         } while (timeout++ < HCLGE_QUERY_MAX_CNT);
864
865         ret = hclge_parse_func_status(hdev, req);
866
867         return ret;
868 }
869
870 static int hclge_query_pf_resource(struct hclge_dev *hdev)
871 {
872         struct hclge_pf_res_cmd *req;
873         struct hclge_desc desc;
874         int ret;
875
876         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
877         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
878         if (ret) {
879                 dev_err(&hdev->pdev->dev,
880                         "query pf resource failed %d.\n", ret);
881                 return ret;
882         }
883
884         req = (struct hclge_pf_res_cmd *)desc.data;
885         hdev->num_tqps = __le16_to_cpu(req->tqp_num);
886         hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
887
888         if (req->tx_buf_size)
889                 hdev->tx_buf_size =
890                         __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
891         else
892                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
893
894         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
895
896         if (req->dv_buf_size)
897                 hdev->dv_buf_size =
898                         __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
899         else
900                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
901
902         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
903
904         if (hnae3_dev_roce_supported(hdev)) {
905                 hdev->roce_base_msix_offset =
906                 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
907                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
908                 hdev->num_roce_msi =
909                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
910                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
911
912                 /* nic's msix numbers is always equals to the roce's. */
913                 hdev->num_nic_msi = hdev->num_roce_msi;
914
915                 /* PF should have NIC vectors and Roce vectors,
916                  * NIC vectors are queued before Roce vectors.
917                  */
918                 hdev->num_msi = hdev->num_roce_msi +
919                                 hdev->roce_base_msix_offset;
920         } else {
921                 hdev->num_msi =
922                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
923                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
924
925                 hdev->num_nic_msi = hdev->num_msi;
926         }
927
928         if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
929                 dev_err(&hdev->pdev->dev,
930                         "Just %u msi resources, not enough for pf(min:2).\n",
931                         hdev->num_nic_msi);
932                 return -EINVAL;
933         }
934
935         return 0;
936 }
937
938 static int hclge_parse_speed(int speed_cmd, int *speed)
939 {
940         switch (speed_cmd) {
941         case 6:
942                 *speed = HCLGE_MAC_SPEED_10M;
943                 break;
944         case 7:
945                 *speed = HCLGE_MAC_SPEED_100M;
946                 break;
947         case 0:
948                 *speed = HCLGE_MAC_SPEED_1G;
949                 break;
950         case 1:
951                 *speed = HCLGE_MAC_SPEED_10G;
952                 break;
953         case 2:
954                 *speed = HCLGE_MAC_SPEED_25G;
955                 break;
956         case 3:
957                 *speed = HCLGE_MAC_SPEED_40G;
958                 break;
959         case 4:
960                 *speed = HCLGE_MAC_SPEED_50G;
961                 break;
962         case 5:
963                 *speed = HCLGE_MAC_SPEED_100G;
964                 break;
965         default:
966                 return -EINVAL;
967         }
968
969         return 0;
970 }
971
972 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
973 {
974         struct hclge_vport *vport = hclge_get_vport(handle);
975         struct hclge_dev *hdev = vport->back;
976         u32 speed_ability = hdev->hw.mac.speed_ability;
977         u32 speed_bit = 0;
978
979         switch (speed) {
980         case HCLGE_MAC_SPEED_10M:
981                 speed_bit = HCLGE_SUPPORT_10M_BIT;
982                 break;
983         case HCLGE_MAC_SPEED_100M:
984                 speed_bit = HCLGE_SUPPORT_100M_BIT;
985                 break;
986         case HCLGE_MAC_SPEED_1G:
987                 speed_bit = HCLGE_SUPPORT_1G_BIT;
988                 break;
989         case HCLGE_MAC_SPEED_10G:
990                 speed_bit = HCLGE_SUPPORT_10G_BIT;
991                 break;
992         case HCLGE_MAC_SPEED_25G:
993                 speed_bit = HCLGE_SUPPORT_25G_BIT;
994                 break;
995         case HCLGE_MAC_SPEED_40G:
996                 speed_bit = HCLGE_SUPPORT_40G_BIT;
997                 break;
998         case HCLGE_MAC_SPEED_50G:
999                 speed_bit = HCLGE_SUPPORT_50G_BIT;
1000                 break;
1001         case HCLGE_MAC_SPEED_100G:
1002                 speed_bit = HCLGE_SUPPORT_100G_BIT;
1003                 break;
1004         default:
1005                 return -EINVAL;
1006         }
1007
1008         if (speed_bit & speed_ability)
1009                 return 0;
1010
1011         return -EINVAL;
1012 }
1013
1014 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1015 {
1016         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1017                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1018                                  mac->supported);
1019         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1020                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1021                                  mac->supported);
1022         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1023                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1024                                  mac->supported);
1025         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1026                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1027                                  mac->supported);
1028         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1029                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1030                                  mac->supported);
1031 }
1032
1033 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1034 {
1035         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1036                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1037                                  mac->supported);
1038         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1039                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1040                                  mac->supported);
1041         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1042                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1043                                  mac->supported);
1044         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1045                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1046                                  mac->supported);
1047         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1048                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1049                                  mac->supported);
1050 }
1051
1052 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1053 {
1054         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1055                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1056                                  mac->supported);
1057         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1058                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1059                                  mac->supported);
1060         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1061                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1062                                  mac->supported);
1063         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1064                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1065                                  mac->supported);
1066         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1067                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1068                                  mac->supported);
1069 }
1070
1071 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1072 {
1073         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1074                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1075                                  mac->supported);
1076         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1077                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1078                                  mac->supported);
1079         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1080                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1081                                  mac->supported);
1082         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1083                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1084                                  mac->supported);
1085         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1086                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1087                                  mac->supported);
1088         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1089                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1090                                  mac->supported);
1091 }
1092
1093 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1094 {
1095         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1096         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1097
1098         switch (mac->speed) {
1099         case HCLGE_MAC_SPEED_10G:
1100         case HCLGE_MAC_SPEED_40G:
1101                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1102                                  mac->supported);
1103                 mac->fec_ability =
1104                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1105                 break;
1106         case HCLGE_MAC_SPEED_25G:
1107         case HCLGE_MAC_SPEED_50G:
1108                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1109                                  mac->supported);
1110                 mac->fec_ability =
1111                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1112                         BIT(HNAE3_FEC_AUTO);
1113                 break;
1114         case HCLGE_MAC_SPEED_100G:
1115                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1116                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1117                 break;
1118         default:
1119                 mac->fec_ability = 0;
1120                 break;
1121         }
1122 }
1123
1124 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1125                                         u8 speed_ability)
1126 {
1127         struct hclge_mac *mac = &hdev->hw.mac;
1128
1129         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1130                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1131                                  mac->supported);
1132
1133         hclge_convert_setting_sr(mac, speed_ability);
1134         hclge_convert_setting_lr(mac, speed_ability);
1135         hclge_convert_setting_cr(mac, speed_ability);
1136         if (hdev->pdev->revision >= 0x21)
1137                 hclge_convert_setting_fec(mac);
1138
1139         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1140         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1141         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1142 }
1143
1144 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1145                                             u8 speed_ability)
1146 {
1147         struct hclge_mac *mac = &hdev->hw.mac;
1148
1149         hclge_convert_setting_kr(mac, speed_ability);
1150         if (hdev->pdev->revision >= 0x21)
1151                 hclge_convert_setting_fec(mac);
1152         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1153         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1154         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1155 }
1156
1157 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1158                                          u8 speed_ability)
1159 {
1160         unsigned long *supported = hdev->hw.mac.supported;
1161
1162         /* default to support all speed for GE port */
1163         if (!speed_ability)
1164                 speed_ability = HCLGE_SUPPORT_GE;
1165
1166         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1167                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1168                                  supported);
1169
1170         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1171                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1172                                  supported);
1173                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1174                                  supported);
1175         }
1176
1177         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1178                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1179                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1180         }
1181
1182         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1183         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1184         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1185         linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1186 }
1187
1188 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1189 {
1190         u8 media_type = hdev->hw.mac.media_type;
1191
1192         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1193                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1194         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1195                 hclge_parse_copper_link_mode(hdev, speed_ability);
1196         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1197                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1198 }
1199
1200 static u32 hclge_get_max_speed(u8 speed_ability)
1201 {
1202         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1203                 return HCLGE_MAC_SPEED_100G;
1204
1205         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1206                 return HCLGE_MAC_SPEED_50G;
1207
1208         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1209                 return HCLGE_MAC_SPEED_40G;
1210
1211         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1212                 return HCLGE_MAC_SPEED_25G;
1213
1214         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1215                 return HCLGE_MAC_SPEED_10G;
1216
1217         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1218                 return HCLGE_MAC_SPEED_1G;
1219
1220         if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1221                 return HCLGE_MAC_SPEED_100M;
1222
1223         if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1224                 return HCLGE_MAC_SPEED_10M;
1225
1226         return HCLGE_MAC_SPEED_1G;
1227 }
1228
1229 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1230 {
1231         struct hclge_cfg_param_cmd *req;
1232         u64 mac_addr_tmp_high;
1233         u64 mac_addr_tmp;
1234         unsigned int i;
1235
1236         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1237
1238         /* get the configuration */
1239         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1240                                               HCLGE_CFG_VMDQ_M,
1241                                               HCLGE_CFG_VMDQ_S);
1242         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1243                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1244         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1245                                             HCLGE_CFG_TQP_DESC_N_M,
1246                                             HCLGE_CFG_TQP_DESC_N_S);
1247
1248         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1249                                         HCLGE_CFG_PHY_ADDR_M,
1250                                         HCLGE_CFG_PHY_ADDR_S);
1251         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1252                                           HCLGE_CFG_MEDIA_TP_M,
1253                                           HCLGE_CFG_MEDIA_TP_S);
1254         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1255                                           HCLGE_CFG_RX_BUF_LEN_M,
1256                                           HCLGE_CFG_RX_BUF_LEN_S);
1257         /* get mac_address */
1258         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1259         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1260                                             HCLGE_CFG_MAC_ADDR_H_M,
1261                                             HCLGE_CFG_MAC_ADDR_H_S);
1262
1263         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1264
1265         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1266                                              HCLGE_CFG_DEFAULT_SPEED_M,
1267                                              HCLGE_CFG_DEFAULT_SPEED_S);
1268         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1269                                             HCLGE_CFG_RSS_SIZE_M,
1270                                             HCLGE_CFG_RSS_SIZE_S);
1271
1272         for (i = 0; i < ETH_ALEN; i++)
1273                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1274
1275         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1276         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1277
1278         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1279                                              HCLGE_CFG_SPEED_ABILITY_M,
1280                                              HCLGE_CFG_SPEED_ABILITY_S);
1281         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1282                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1283                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1284         if (!cfg->umv_space)
1285                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1286 }
1287
1288 /* hclge_get_cfg: query the static parameter from flash
1289  * @hdev: pointer to struct hclge_dev
1290  * @hcfg: the config structure to be getted
1291  */
1292 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1293 {
1294         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1295         struct hclge_cfg_param_cmd *req;
1296         unsigned int i;
1297         int ret;
1298
1299         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1300                 u32 offset = 0;
1301
1302                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1303                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1304                                            true);
1305                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1306                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1307                 /* Len should be united by 4 bytes when send to hardware */
1308                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1309                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1310                 req->offset = cpu_to_le32(offset);
1311         }
1312
1313         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1314         if (ret) {
1315                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1316                 return ret;
1317         }
1318
1319         hclge_parse_cfg(hcfg, desc);
1320
1321         return 0;
1322 }
1323
1324 static int hclge_get_cap(struct hclge_dev *hdev)
1325 {
1326         int ret;
1327
1328         ret = hclge_query_function_status(hdev);
1329         if (ret) {
1330                 dev_err(&hdev->pdev->dev,
1331                         "query function status error %d.\n", ret);
1332                 return ret;
1333         }
1334
1335         /* get pf resource */
1336         ret = hclge_query_pf_resource(hdev);
1337         if (ret)
1338                 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1339
1340         return ret;
1341 }
1342
1343 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1344 {
1345 #define HCLGE_MIN_TX_DESC       64
1346 #define HCLGE_MIN_RX_DESC       64
1347
1348         if (!is_kdump_kernel())
1349                 return;
1350
1351         dev_info(&hdev->pdev->dev,
1352                  "Running kdump kernel. Using minimal resources\n");
1353
1354         /* minimal queue pairs equals to the number of vports */
1355         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1356         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1357         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1358 }
1359
1360 static int hclge_configure(struct hclge_dev *hdev)
1361 {
1362         struct hclge_cfg cfg;
1363         unsigned int i;
1364         int ret;
1365
1366         ret = hclge_get_cfg(hdev, &cfg);
1367         if (ret) {
1368                 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1369                 return ret;
1370         }
1371
1372         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1373         hdev->base_tqp_pid = 0;
1374         hdev->rss_size_max = cfg.rss_size_max;
1375         hdev->rx_buf_len = cfg.rx_buf_len;
1376         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1377         hdev->hw.mac.media_type = cfg.media_type;
1378         hdev->hw.mac.phy_addr = cfg.phy_addr;
1379         hdev->num_tx_desc = cfg.tqp_desc_num;
1380         hdev->num_rx_desc = cfg.tqp_desc_num;
1381         hdev->tm_info.num_pg = 1;
1382         hdev->tc_max = cfg.tc_num;
1383         hdev->tm_info.hw_pfc_map = 0;
1384         hdev->wanted_umv_size = cfg.umv_space;
1385
1386         if (hnae3_dev_fd_supported(hdev)) {
1387                 hdev->fd_en = true;
1388                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1389         }
1390
1391         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1392         if (ret) {
1393                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1394                 return ret;
1395         }
1396
1397         hclge_parse_link_mode(hdev, cfg.speed_ability);
1398
1399         hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1400
1401         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1402             (hdev->tc_max < 1)) {
1403                 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1404                          hdev->tc_max);
1405                 hdev->tc_max = 1;
1406         }
1407
1408         /* Dev does not support DCB */
1409         if (!hnae3_dev_dcb_supported(hdev)) {
1410                 hdev->tc_max = 1;
1411                 hdev->pfc_max = 0;
1412         } else {
1413                 hdev->pfc_max = hdev->tc_max;
1414         }
1415
1416         hdev->tm_info.num_tc = 1;
1417
1418         /* Currently not support uncontiuous tc */
1419         for (i = 0; i < hdev->tm_info.num_tc; i++)
1420                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1421
1422         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1423
1424         hclge_init_kdump_kernel_config(hdev);
1425
1426         /* Set the init affinity based on pci func number */
1427         i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1428         i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1429         cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1430                         &hdev->affinity_mask);
1431
1432         return ret;
1433 }
1434
1435 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1436                             unsigned int tso_mss_max)
1437 {
1438         struct hclge_cfg_tso_status_cmd *req;
1439         struct hclge_desc desc;
1440         u16 tso_mss;
1441
1442         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1443
1444         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1445
1446         tso_mss = 0;
1447         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1448                         HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1449         req->tso_mss_min = cpu_to_le16(tso_mss);
1450
1451         tso_mss = 0;
1452         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1453                         HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1454         req->tso_mss_max = cpu_to_le16(tso_mss);
1455
1456         return hclge_cmd_send(&hdev->hw, &desc, 1);
1457 }
1458
1459 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1460 {
1461         struct hclge_cfg_gro_status_cmd *req;
1462         struct hclge_desc desc;
1463         int ret;
1464
1465         if (!hnae3_dev_gro_supported(hdev))
1466                 return 0;
1467
1468         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1469         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1470
1471         req->gro_en = cpu_to_le16(en ? 1 : 0);
1472
1473         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1474         if (ret)
1475                 dev_err(&hdev->pdev->dev,
1476                         "GRO hardware config cmd failed, ret = %d\n", ret);
1477
1478         return ret;
1479 }
1480
1481 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1482 {
1483         struct hclge_tqp *tqp;
1484         int i;
1485
1486         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1487                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1488         if (!hdev->htqp)
1489                 return -ENOMEM;
1490
1491         tqp = hdev->htqp;
1492
1493         for (i = 0; i < hdev->num_tqps; i++) {
1494                 tqp->dev = &hdev->pdev->dev;
1495                 tqp->index = i;
1496
1497                 tqp->q.ae_algo = &ae_algo;
1498                 tqp->q.buf_size = hdev->rx_buf_len;
1499                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1500                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1501                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1502                         i * HCLGE_TQP_REG_SIZE;
1503
1504                 tqp++;
1505         }
1506
1507         return 0;
1508 }
1509
1510 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1511                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1512 {
1513         struct hclge_tqp_map_cmd *req;
1514         struct hclge_desc desc;
1515         int ret;
1516
1517         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1518
1519         req = (struct hclge_tqp_map_cmd *)desc.data;
1520         req->tqp_id = cpu_to_le16(tqp_pid);
1521         req->tqp_vf = func_id;
1522         req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1523         if (!is_pf)
1524                 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1525         req->tqp_vid = cpu_to_le16(tqp_vid);
1526
1527         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1528         if (ret)
1529                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1530
1531         return ret;
1532 }
1533
1534 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1535 {
1536         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1537         struct hclge_dev *hdev = vport->back;
1538         int i, alloced;
1539
1540         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1541              alloced < num_tqps; i++) {
1542                 if (!hdev->htqp[i].alloced) {
1543                         hdev->htqp[i].q.handle = &vport->nic;
1544                         hdev->htqp[i].q.tqp_index = alloced;
1545                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1546                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1547                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1548                         hdev->htqp[i].alloced = true;
1549                         alloced++;
1550                 }
1551         }
1552         vport->alloc_tqps = alloced;
1553         kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1554                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1555
1556         /* ensure one to one mapping between irq and queue at default */
1557         kinfo->rss_size = min_t(u16, kinfo->rss_size,
1558                                 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1559
1560         return 0;
1561 }
1562
1563 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1564                             u16 num_tx_desc, u16 num_rx_desc)
1565
1566 {
1567         struct hnae3_handle *nic = &vport->nic;
1568         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1569         struct hclge_dev *hdev = vport->back;
1570         int ret;
1571
1572         kinfo->num_tx_desc = num_tx_desc;
1573         kinfo->num_rx_desc = num_rx_desc;
1574
1575         kinfo->rx_buf_len = hdev->rx_buf_len;
1576
1577         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1578                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1579         if (!kinfo->tqp)
1580                 return -ENOMEM;
1581
1582         ret = hclge_assign_tqp(vport, num_tqps);
1583         if (ret)
1584                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1585
1586         return ret;
1587 }
1588
1589 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1590                                   struct hclge_vport *vport)
1591 {
1592         struct hnae3_handle *nic = &vport->nic;
1593         struct hnae3_knic_private_info *kinfo;
1594         u16 i;
1595
1596         kinfo = &nic->kinfo;
1597         for (i = 0; i < vport->alloc_tqps; i++) {
1598                 struct hclge_tqp *q =
1599                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1600                 bool is_pf;
1601                 int ret;
1602
1603                 is_pf = !(vport->vport_id);
1604                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1605                                              i, is_pf);
1606                 if (ret)
1607                         return ret;
1608         }
1609
1610         return 0;
1611 }
1612
1613 static int hclge_map_tqp(struct hclge_dev *hdev)
1614 {
1615         struct hclge_vport *vport = hdev->vport;
1616         u16 i, num_vport;
1617
1618         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1619         for (i = 0; i < num_vport; i++) {
1620                 int ret;
1621
1622                 ret = hclge_map_tqp_to_vport(hdev, vport);
1623                 if (ret)
1624                         return ret;
1625
1626                 vport++;
1627         }
1628
1629         return 0;
1630 }
1631
1632 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1633 {
1634         struct hnae3_handle *nic = &vport->nic;
1635         struct hclge_dev *hdev = vport->back;
1636         int ret;
1637
1638         nic->pdev = hdev->pdev;
1639         nic->ae_algo = &ae_algo;
1640         nic->numa_node_mask = hdev->numa_node_mask;
1641
1642         ret = hclge_knic_setup(vport, num_tqps,
1643                                hdev->num_tx_desc, hdev->num_rx_desc);
1644         if (ret)
1645                 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1646
1647         return ret;
1648 }
1649
1650 static int hclge_alloc_vport(struct hclge_dev *hdev)
1651 {
1652         struct pci_dev *pdev = hdev->pdev;
1653         struct hclge_vport *vport;
1654         u32 tqp_main_vport;
1655         u32 tqp_per_vport;
1656         int num_vport, i;
1657         int ret;
1658
1659         /* We need to alloc a vport for main NIC of PF */
1660         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1661
1662         if (hdev->num_tqps < num_vport) {
1663                 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1664                         hdev->num_tqps, num_vport);
1665                 return -EINVAL;
1666         }
1667
1668         /* Alloc the same number of TQPs for every vport */
1669         tqp_per_vport = hdev->num_tqps / num_vport;
1670         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1671
1672         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1673                              GFP_KERNEL);
1674         if (!vport)
1675                 return -ENOMEM;
1676
1677         hdev->vport = vport;
1678         hdev->num_alloc_vport = num_vport;
1679
1680         if (IS_ENABLED(CONFIG_PCI_IOV))
1681                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1682
1683         for (i = 0; i < num_vport; i++) {
1684                 vport->back = hdev;
1685                 vport->vport_id = i;
1686                 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1687                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1688                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1689                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1690                 INIT_LIST_HEAD(&vport->vlan_list);
1691                 INIT_LIST_HEAD(&vport->uc_mac_list);
1692                 INIT_LIST_HEAD(&vport->mc_mac_list);
1693
1694                 if (i == 0)
1695                         ret = hclge_vport_setup(vport, tqp_main_vport);
1696                 else
1697                         ret = hclge_vport_setup(vport, tqp_per_vport);
1698                 if (ret) {
1699                         dev_err(&pdev->dev,
1700                                 "vport setup failed for vport %d, %d\n",
1701                                 i, ret);
1702                         return ret;
1703                 }
1704
1705                 vport++;
1706         }
1707
1708         return 0;
1709 }
1710
1711 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1712                                     struct hclge_pkt_buf_alloc *buf_alloc)
1713 {
1714 /* TX buffer size is unit by 128 byte */
1715 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1716 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1717         struct hclge_tx_buff_alloc_cmd *req;
1718         struct hclge_desc desc;
1719         int ret;
1720         u8 i;
1721
1722         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1723
1724         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1725         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1726                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1727
1728                 req->tx_pkt_buff[i] =
1729                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1730                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1731         }
1732
1733         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1734         if (ret)
1735                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1736                         ret);
1737
1738         return ret;
1739 }
1740
1741 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1742                                  struct hclge_pkt_buf_alloc *buf_alloc)
1743 {
1744         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1745
1746         if (ret)
1747                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1748
1749         return ret;
1750 }
1751
1752 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1753 {
1754         unsigned int i;
1755         u32 cnt = 0;
1756
1757         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1758                 if (hdev->hw_tc_map & BIT(i))
1759                         cnt++;
1760         return cnt;
1761 }
1762
1763 /* Get the number of pfc enabled TCs, which have private buffer */
1764 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1765                                   struct hclge_pkt_buf_alloc *buf_alloc)
1766 {
1767         struct hclge_priv_buf *priv;
1768         unsigned int i;
1769         int cnt = 0;
1770
1771         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1772                 priv = &buf_alloc->priv_buf[i];
1773                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1774                     priv->enable)
1775                         cnt++;
1776         }
1777
1778         return cnt;
1779 }
1780
1781 /* Get the number of pfc disabled TCs, which have private buffer */
1782 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1783                                      struct hclge_pkt_buf_alloc *buf_alloc)
1784 {
1785         struct hclge_priv_buf *priv;
1786         unsigned int i;
1787         int cnt = 0;
1788
1789         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1790                 priv = &buf_alloc->priv_buf[i];
1791                 if (hdev->hw_tc_map & BIT(i) &&
1792                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1793                     priv->enable)
1794                         cnt++;
1795         }
1796
1797         return cnt;
1798 }
1799
1800 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1801 {
1802         struct hclge_priv_buf *priv;
1803         u32 rx_priv = 0;
1804         int i;
1805
1806         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1807                 priv = &buf_alloc->priv_buf[i];
1808                 if (priv->enable)
1809                         rx_priv += priv->buf_size;
1810         }
1811         return rx_priv;
1812 }
1813
1814 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1815 {
1816         u32 i, total_tx_size = 0;
1817
1818         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1819                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1820
1821         return total_tx_size;
1822 }
1823
1824 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1825                                 struct hclge_pkt_buf_alloc *buf_alloc,
1826                                 u32 rx_all)
1827 {
1828         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1829         u32 tc_num = hclge_get_tc_num(hdev);
1830         u32 shared_buf, aligned_mps;
1831         u32 rx_priv;
1832         int i;
1833
1834         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1835
1836         if (hnae3_dev_dcb_supported(hdev))
1837                 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1838                                         hdev->dv_buf_size;
1839         else
1840                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1841                                         + hdev->dv_buf_size;
1842
1843         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1844         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1845                              HCLGE_BUF_SIZE_UNIT);
1846
1847         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1848         if (rx_all < rx_priv + shared_std)
1849                 return false;
1850
1851         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1852         buf_alloc->s_buf.buf_size = shared_buf;
1853         if (hnae3_dev_dcb_supported(hdev)) {
1854                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1855                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1856                         - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1857                                   HCLGE_BUF_SIZE_UNIT);
1858         } else {
1859                 buf_alloc->s_buf.self.high = aligned_mps +
1860                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1861                 buf_alloc->s_buf.self.low = aligned_mps;
1862         }
1863
1864         if (hnae3_dev_dcb_supported(hdev)) {
1865                 hi_thrd = shared_buf - hdev->dv_buf_size;
1866
1867                 if (tc_num <= NEED_RESERVE_TC_NUM)
1868                         hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1869                                         / BUF_MAX_PERCENT;
1870
1871                 if (tc_num)
1872                         hi_thrd = hi_thrd / tc_num;
1873
1874                 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1875                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1876                 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1877         } else {
1878                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1879                 lo_thrd = aligned_mps;
1880         }
1881
1882         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1883                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1884                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1885         }
1886
1887         return true;
1888 }
1889
1890 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1891                                 struct hclge_pkt_buf_alloc *buf_alloc)
1892 {
1893         u32 i, total_size;
1894
1895         total_size = hdev->pkt_buf_size;
1896
1897         /* alloc tx buffer for all enabled tc */
1898         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1899                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1900
1901                 if (hdev->hw_tc_map & BIT(i)) {
1902                         if (total_size < hdev->tx_buf_size)
1903                                 return -ENOMEM;
1904
1905                         priv->tx_buf_size = hdev->tx_buf_size;
1906                 } else {
1907                         priv->tx_buf_size = 0;
1908                 }
1909
1910                 total_size -= priv->tx_buf_size;
1911         }
1912
1913         return 0;
1914 }
1915
1916 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1917                                   struct hclge_pkt_buf_alloc *buf_alloc)
1918 {
1919         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1920         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1921         unsigned int i;
1922
1923         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1924                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1925
1926                 priv->enable = 0;
1927                 priv->wl.low = 0;
1928                 priv->wl.high = 0;
1929                 priv->buf_size = 0;
1930
1931                 if (!(hdev->hw_tc_map & BIT(i)))
1932                         continue;
1933
1934                 priv->enable = 1;
1935
1936                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1937                         priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1938                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
1939                                                 HCLGE_BUF_SIZE_UNIT);
1940                 } else {
1941                         priv->wl.low = 0;
1942                         priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1943                                         aligned_mps;
1944                 }
1945
1946                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1947         }
1948
1949         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1950 }
1951
1952 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1953                                           struct hclge_pkt_buf_alloc *buf_alloc)
1954 {
1955         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1956         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1957         int i;
1958
1959         /* let the last to be cleared first */
1960         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1961                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1962                 unsigned int mask = BIT((unsigned int)i);
1963
1964                 if (hdev->hw_tc_map & mask &&
1965                     !(hdev->tm_info.hw_pfc_map & mask)) {
1966                         /* Clear the no pfc TC private buffer */
1967                         priv->wl.low = 0;
1968                         priv->wl.high = 0;
1969                         priv->buf_size = 0;
1970                         priv->enable = 0;
1971                         no_pfc_priv_num--;
1972                 }
1973
1974                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1975                     no_pfc_priv_num == 0)
1976                         break;
1977         }
1978
1979         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1980 }
1981
1982 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1983                                         struct hclge_pkt_buf_alloc *buf_alloc)
1984 {
1985         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1986         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1987         int i;
1988
1989         /* let the last to be cleared first */
1990         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1991                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1992                 unsigned int mask = BIT((unsigned int)i);
1993
1994                 if (hdev->hw_tc_map & mask &&
1995                     hdev->tm_info.hw_pfc_map & mask) {
1996                         /* Reduce the number of pfc TC with private buffer */
1997                         priv->wl.low = 0;
1998                         priv->enable = 0;
1999                         priv->wl.high = 0;
2000                         priv->buf_size = 0;
2001                         pfc_priv_num--;
2002                 }
2003
2004                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2005                     pfc_priv_num == 0)
2006                         break;
2007         }
2008
2009         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2010 }
2011
2012 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2013                                       struct hclge_pkt_buf_alloc *buf_alloc)
2014 {
2015 #define COMPENSATE_BUFFER       0x3C00
2016 #define COMPENSATE_HALF_MPS_NUM 5
2017 #define PRIV_WL_GAP             0x1800
2018
2019         u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2020         u32 tc_num = hclge_get_tc_num(hdev);
2021         u32 half_mps = hdev->mps >> 1;
2022         u32 min_rx_priv;
2023         unsigned int i;
2024
2025         if (tc_num)
2026                 rx_priv = rx_priv / tc_num;
2027
2028         if (tc_num <= NEED_RESERVE_TC_NUM)
2029                 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2030
2031         min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2032                         COMPENSATE_HALF_MPS_NUM * half_mps;
2033         min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2034         rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2035
2036         if (rx_priv < min_rx_priv)
2037                 return false;
2038
2039         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2040                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2041
2042                 priv->enable = 0;
2043                 priv->wl.low = 0;
2044                 priv->wl.high = 0;
2045                 priv->buf_size = 0;
2046
2047                 if (!(hdev->hw_tc_map & BIT(i)))
2048                         continue;
2049
2050                 priv->enable = 1;
2051                 priv->buf_size = rx_priv;
2052                 priv->wl.high = rx_priv - hdev->dv_buf_size;
2053                 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2054         }
2055
2056         buf_alloc->s_buf.buf_size = 0;
2057
2058         return true;
2059 }
2060
2061 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2062  * @hdev: pointer to struct hclge_dev
2063  * @buf_alloc: pointer to buffer calculation data
2064  * @return: 0: calculate sucessful, negative: fail
2065  */
2066 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2067                                 struct hclge_pkt_buf_alloc *buf_alloc)
2068 {
2069         /* When DCB is not supported, rx private buffer is not allocated. */
2070         if (!hnae3_dev_dcb_supported(hdev)) {
2071                 u32 rx_all = hdev->pkt_buf_size;
2072
2073                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2074                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2075                         return -ENOMEM;
2076
2077                 return 0;
2078         }
2079
2080         if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2081                 return 0;
2082
2083         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2084                 return 0;
2085
2086         /* try to decrease the buffer size */
2087         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2088                 return 0;
2089
2090         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2091                 return 0;
2092
2093         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2094                 return 0;
2095
2096         return -ENOMEM;
2097 }
2098
2099 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2100                                    struct hclge_pkt_buf_alloc *buf_alloc)
2101 {
2102         struct hclge_rx_priv_buff_cmd *req;
2103         struct hclge_desc desc;
2104         int ret;
2105         int i;
2106
2107         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2108         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2109
2110         /* Alloc private buffer TCs */
2111         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2112                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2113
2114                 req->buf_num[i] =
2115                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2116                 req->buf_num[i] |=
2117                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2118         }
2119
2120         req->shared_buf =
2121                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2122                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
2123
2124         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2125         if (ret)
2126                 dev_err(&hdev->pdev->dev,
2127                         "rx private buffer alloc cmd failed %d\n", ret);
2128
2129         return ret;
2130 }
2131
2132 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2133                                    struct hclge_pkt_buf_alloc *buf_alloc)
2134 {
2135         struct hclge_rx_priv_wl_buf *req;
2136         struct hclge_priv_buf *priv;
2137         struct hclge_desc desc[2];
2138         int i, j;
2139         int ret;
2140
2141         for (i = 0; i < 2; i++) {
2142                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2143                                            false);
2144                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2145
2146                 /* The first descriptor set the NEXT bit to 1 */
2147                 if (i == 0)
2148                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2149                 else
2150                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2151
2152                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2153                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2154
2155                         priv = &buf_alloc->priv_buf[idx];
2156                         req->tc_wl[j].high =
2157                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2158                         req->tc_wl[j].high |=
2159                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2160                         req->tc_wl[j].low =
2161                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2162                         req->tc_wl[j].low |=
2163                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2164                 }
2165         }
2166
2167         /* Send 2 descriptor at one time */
2168         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2169         if (ret)
2170                 dev_err(&hdev->pdev->dev,
2171                         "rx private waterline config cmd failed %d\n",
2172                         ret);
2173         return ret;
2174 }
2175
2176 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2177                                     struct hclge_pkt_buf_alloc *buf_alloc)
2178 {
2179         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2180         struct hclge_rx_com_thrd *req;
2181         struct hclge_desc desc[2];
2182         struct hclge_tc_thrd *tc;
2183         int i, j;
2184         int ret;
2185
2186         for (i = 0; i < 2; i++) {
2187                 hclge_cmd_setup_basic_desc(&desc[i],
2188                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2189                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2190
2191                 /* The first descriptor set the NEXT bit to 1 */
2192                 if (i == 0)
2193                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2194                 else
2195                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2196
2197                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2198                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2199
2200                         req->com_thrd[j].high =
2201                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2202                         req->com_thrd[j].high |=
2203                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2204                         req->com_thrd[j].low =
2205                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2206                         req->com_thrd[j].low |=
2207                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2208                 }
2209         }
2210
2211         /* Send 2 descriptors at one time */
2212         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2213         if (ret)
2214                 dev_err(&hdev->pdev->dev,
2215                         "common threshold config cmd failed %d\n", ret);
2216         return ret;
2217 }
2218
2219 static int hclge_common_wl_config(struct hclge_dev *hdev,
2220                                   struct hclge_pkt_buf_alloc *buf_alloc)
2221 {
2222         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2223         struct hclge_rx_com_wl *req;
2224         struct hclge_desc desc;
2225         int ret;
2226
2227         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2228
2229         req = (struct hclge_rx_com_wl *)desc.data;
2230         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2231         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2232
2233         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2234         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2235
2236         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2237         if (ret)
2238                 dev_err(&hdev->pdev->dev,
2239                         "common waterline config cmd failed %d\n", ret);
2240
2241         return ret;
2242 }
2243
2244 int hclge_buffer_alloc(struct hclge_dev *hdev)
2245 {
2246         struct hclge_pkt_buf_alloc *pkt_buf;
2247         int ret;
2248
2249         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2250         if (!pkt_buf)
2251                 return -ENOMEM;
2252
2253         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2254         if (ret) {
2255                 dev_err(&hdev->pdev->dev,
2256                         "could not calc tx buffer size for all TCs %d\n", ret);
2257                 goto out;
2258         }
2259
2260         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2261         if (ret) {
2262                 dev_err(&hdev->pdev->dev,
2263                         "could not alloc tx buffers %d\n", ret);
2264                 goto out;
2265         }
2266
2267         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2268         if (ret) {
2269                 dev_err(&hdev->pdev->dev,
2270                         "could not calc rx priv buffer size for all TCs %d\n",
2271                         ret);
2272                 goto out;
2273         }
2274
2275         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2276         if (ret) {
2277                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2278                         ret);
2279                 goto out;
2280         }
2281
2282         if (hnae3_dev_dcb_supported(hdev)) {
2283                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2284                 if (ret) {
2285                         dev_err(&hdev->pdev->dev,
2286                                 "could not configure rx private waterline %d\n",
2287                                 ret);
2288                         goto out;
2289                 }
2290
2291                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2292                 if (ret) {
2293                         dev_err(&hdev->pdev->dev,
2294                                 "could not configure common threshold %d\n",
2295                                 ret);
2296                         goto out;
2297                 }
2298         }
2299
2300         ret = hclge_common_wl_config(hdev, pkt_buf);
2301         if (ret)
2302                 dev_err(&hdev->pdev->dev,
2303                         "could not configure common waterline %d\n", ret);
2304
2305 out:
2306         kfree(pkt_buf);
2307         return ret;
2308 }
2309
2310 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2311 {
2312         struct hnae3_handle *roce = &vport->roce;
2313         struct hnae3_handle *nic = &vport->nic;
2314
2315         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2316
2317         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2318             vport->back->num_msi_left == 0)
2319                 return -EINVAL;
2320
2321         roce->rinfo.base_vector = vport->back->roce_base_vector;
2322
2323         roce->rinfo.netdev = nic->kinfo.netdev;
2324         roce->rinfo.roce_io_base = vport->back->hw.io_base;
2325
2326         roce->pdev = nic->pdev;
2327         roce->ae_algo = nic->ae_algo;
2328         roce->numa_node_mask = nic->numa_node_mask;
2329
2330         return 0;
2331 }
2332
2333 static int hclge_init_msi(struct hclge_dev *hdev)
2334 {
2335         struct pci_dev *pdev = hdev->pdev;
2336         int vectors;
2337         int i;
2338
2339         vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2340                                         hdev->num_msi,
2341                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2342         if (vectors < 0) {
2343                 dev_err(&pdev->dev,
2344                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2345                         vectors);
2346                 return vectors;
2347         }
2348         if (vectors < hdev->num_msi)
2349                 dev_warn(&hdev->pdev->dev,
2350                          "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2351                          hdev->num_msi, vectors);
2352
2353         hdev->num_msi = vectors;
2354         hdev->num_msi_left = vectors;
2355
2356         hdev->base_msi_vector = pdev->irq;
2357         hdev->roce_base_vector = hdev->base_msi_vector +
2358                                 hdev->roce_base_msix_offset;
2359
2360         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2361                                            sizeof(u16), GFP_KERNEL);
2362         if (!hdev->vector_status) {
2363                 pci_free_irq_vectors(pdev);
2364                 return -ENOMEM;
2365         }
2366
2367         for (i = 0; i < hdev->num_msi; i++)
2368                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2369
2370         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2371                                         sizeof(int), GFP_KERNEL);
2372         if (!hdev->vector_irq) {
2373                 pci_free_irq_vectors(pdev);
2374                 return -ENOMEM;
2375         }
2376
2377         return 0;
2378 }
2379
2380 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2381 {
2382         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2383                 duplex = HCLGE_MAC_FULL;
2384
2385         return duplex;
2386 }
2387
2388 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2389                                       u8 duplex)
2390 {
2391         struct hclge_config_mac_speed_dup_cmd *req;
2392         struct hclge_desc desc;
2393         int ret;
2394
2395         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2396
2397         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2398
2399         if (duplex)
2400                 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2401
2402         switch (speed) {
2403         case HCLGE_MAC_SPEED_10M:
2404                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2405                                 HCLGE_CFG_SPEED_S, 6);
2406                 break;
2407         case HCLGE_MAC_SPEED_100M:
2408                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2409                                 HCLGE_CFG_SPEED_S, 7);
2410                 break;
2411         case HCLGE_MAC_SPEED_1G:
2412                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2413                                 HCLGE_CFG_SPEED_S, 0);
2414                 break;
2415         case HCLGE_MAC_SPEED_10G:
2416                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2417                                 HCLGE_CFG_SPEED_S, 1);
2418                 break;
2419         case HCLGE_MAC_SPEED_25G:
2420                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2421                                 HCLGE_CFG_SPEED_S, 2);
2422                 break;
2423         case HCLGE_MAC_SPEED_40G:
2424                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2425                                 HCLGE_CFG_SPEED_S, 3);
2426                 break;
2427         case HCLGE_MAC_SPEED_50G:
2428                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2429                                 HCLGE_CFG_SPEED_S, 4);
2430                 break;
2431         case HCLGE_MAC_SPEED_100G:
2432                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2433                                 HCLGE_CFG_SPEED_S, 5);
2434                 break;
2435         default:
2436                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2437                 return -EINVAL;
2438         }
2439
2440         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2441                       1);
2442
2443         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2444         if (ret) {
2445                 dev_err(&hdev->pdev->dev,
2446                         "mac speed/duplex config cmd failed %d.\n", ret);
2447                 return ret;
2448         }
2449
2450         return 0;
2451 }
2452
2453 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2454 {
2455         int ret;
2456
2457         duplex = hclge_check_speed_dup(duplex, speed);
2458         if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2459                 return 0;
2460
2461         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2462         if (ret)
2463                 return ret;
2464
2465         hdev->hw.mac.speed = speed;
2466         hdev->hw.mac.duplex = duplex;
2467
2468         return 0;
2469 }
2470
2471 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2472                                      u8 duplex)
2473 {
2474         struct hclge_vport *vport = hclge_get_vport(handle);
2475         struct hclge_dev *hdev = vport->back;
2476
2477         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2478 }
2479
2480 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2481 {
2482         struct hclge_config_auto_neg_cmd *req;
2483         struct hclge_desc desc;
2484         u32 flag = 0;
2485         int ret;
2486
2487         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2488
2489         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2490         if (enable)
2491                 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2492         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2493
2494         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2495         if (ret)
2496                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2497                         ret);
2498
2499         return ret;
2500 }
2501
2502 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2503 {
2504         struct hclge_vport *vport = hclge_get_vport(handle);
2505         struct hclge_dev *hdev = vport->back;
2506
2507         if (!hdev->hw.mac.support_autoneg) {
2508                 if (enable) {
2509                         dev_err(&hdev->pdev->dev,
2510                                 "autoneg is not supported by current port\n");
2511                         return -EOPNOTSUPP;
2512                 } else {
2513                         return 0;
2514                 }
2515         }
2516
2517         return hclge_set_autoneg_en(hdev, enable);
2518 }
2519
2520 static int hclge_get_autoneg(struct hnae3_handle *handle)
2521 {
2522         struct hclge_vport *vport = hclge_get_vport(handle);
2523         struct hclge_dev *hdev = vport->back;
2524         struct phy_device *phydev = hdev->hw.mac.phydev;
2525
2526         if (phydev)
2527                 return phydev->autoneg;
2528
2529         return hdev->hw.mac.autoneg;
2530 }
2531
2532 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2533 {
2534         struct hclge_vport *vport = hclge_get_vport(handle);
2535         struct hclge_dev *hdev = vport->back;
2536         int ret;
2537
2538         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2539
2540         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2541         if (ret)
2542                 return ret;
2543         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2544 }
2545
2546 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2547 {
2548         struct hclge_vport *vport = hclge_get_vport(handle);
2549         struct hclge_dev *hdev = vport->back;
2550
2551         if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2552                 return hclge_set_autoneg_en(hdev, !halt);
2553
2554         return 0;
2555 }
2556
2557 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2558 {
2559         struct hclge_config_fec_cmd *req;
2560         struct hclge_desc desc;
2561         int ret;
2562
2563         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2564
2565         req = (struct hclge_config_fec_cmd *)desc.data;
2566         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2567                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2568         if (fec_mode & BIT(HNAE3_FEC_RS))
2569                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2570                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2571         if (fec_mode & BIT(HNAE3_FEC_BASER))
2572                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2573                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2574
2575         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2576         if (ret)
2577                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2578
2579         return ret;
2580 }
2581
2582 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2583 {
2584         struct hclge_vport *vport = hclge_get_vport(handle);
2585         struct hclge_dev *hdev = vport->back;
2586         struct hclge_mac *mac = &hdev->hw.mac;
2587         int ret;
2588
2589         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2590                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2591                 return -EINVAL;
2592         }
2593
2594         ret = hclge_set_fec_hw(hdev, fec_mode);
2595         if (ret)
2596                 return ret;
2597
2598         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2599         return 0;
2600 }
2601
2602 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2603                           u8 *fec_mode)
2604 {
2605         struct hclge_vport *vport = hclge_get_vport(handle);
2606         struct hclge_dev *hdev = vport->back;
2607         struct hclge_mac *mac = &hdev->hw.mac;
2608
2609         if (fec_ability)
2610                 *fec_ability = mac->fec_ability;
2611         if (fec_mode)
2612                 *fec_mode = mac->fec_mode;
2613 }
2614
2615 static int hclge_mac_init(struct hclge_dev *hdev)
2616 {
2617         struct hclge_mac *mac = &hdev->hw.mac;
2618         int ret;
2619
2620         hdev->support_sfp_query = true;
2621         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2622         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2623                                          hdev->hw.mac.duplex);
2624         if (ret) {
2625                 dev_err(&hdev->pdev->dev,
2626                         "Config mac speed dup fail ret=%d\n", ret);
2627                 return ret;
2628         }
2629
2630         if (hdev->hw.mac.support_autoneg) {
2631                 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2632                 if (ret) {
2633                         dev_err(&hdev->pdev->dev,
2634                                 "Config mac autoneg fail ret=%d\n", ret);
2635                         return ret;
2636                 }
2637         }
2638
2639         mac->link = 0;
2640
2641         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2642                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2643                 if (ret) {
2644                         dev_err(&hdev->pdev->dev,
2645                                 "Fec mode init fail, ret = %d\n", ret);
2646                         return ret;
2647                 }
2648         }
2649
2650         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2651         if (ret) {
2652                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2653                 return ret;
2654         }
2655
2656         ret = hclge_set_default_loopback(hdev);
2657         if (ret)
2658                 return ret;
2659
2660         ret = hclge_buffer_alloc(hdev);
2661         if (ret)
2662                 dev_err(&hdev->pdev->dev,
2663                         "allocate buffer fail, ret=%d\n", ret);
2664
2665         return ret;
2666 }
2667
2668 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2669 {
2670         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2671             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2672                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2673                                     hclge_wq, &hdev->service_task, 0);
2674 }
2675
2676 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2677 {
2678         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2679             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2680                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2681                                     hclge_wq, &hdev->service_task, 0);
2682 }
2683
2684 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2685 {
2686         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2687             !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2688                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2689                                     hclge_wq, &hdev->service_task,
2690                                     delay_time);
2691 }
2692
2693 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2694 {
2695         struct hclge_link_status_cmd *req;
2696         struct hclge_desc desc;
2697         int link_status;
2698         int ret;
2699
2700         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2701         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2702         if (ret) {
2703                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2704                         ret);
2705                 return ret;
2706         }
2707
2708         req = (struct hclge_link_status_cmd *)desc.data;
2709         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2710
2711         return !!link_status;
2712 }
2713
2714 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2715 {
2716         unsigned int mac_state;
2717         int link_stat;
2718
2719         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2720                 return 0;
2721
2722         mac_state = hclge_get_mac_link_status(hdev);
2723
2724         if (hdev->hw.mac.phydev) {
2725                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2726                         link_stat = mac_state &
2727                                 hdev->hw.mac.phydev->link;
2728                 else
2729                         link_stat = 0;
2730
2731         } else {
2732                 link_stat = mac_state;
2733         }
2734
2735         return !!link_stat;
2736 }
2737
2738 static void hclge_update_link_status(struct hclge_dev *hdev)
2739 {
2740         struct hnae3_client *rclient = hdev->roce_client;
2741         struct hnae3_client *client = hdev->nic_client;
2742         struct hnae3_handle *rhandle;
2743         struct hnae3_handle *handle;
2744         int state;
2745         int i;
2746
2747         if (!client)
2748                 return;
2749
2750         if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2751                 return;
2752
2753         state = hclge_get_mac_phy_link(hdev);
2754         if (state != hdev->hw.mac.link) {
2755                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2756                         handle = &hdev->vport[i].nic;
2757                         client->ops->link_status_change(handle, state);
2758                         hclge_config_mac_tnl_int(hdev, state);
2759                         rhandle = &hdev->vport[i].roce;
2760                         if (rclient && rclient->ops->link_status_change)
2761                                 rclient->ops->link_status_change(rhandle,
2762                                                                  state);
2763                 }
2764                 hdev->hw.mac.link = state;
2765         }
2766
2767         clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2768 }
2769
2770 static void hclge_update_port_capability(struct hclge_mac *mac)
2771 {
2772         /* update fec ability by speed */
2773         hclge_convert_setting_fec(mac);
2774
2775         /* firmware can not identify back plane type, the media type
2776          * read from configuration can help deal it
2777          */
2778         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2779             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2780                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2781         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2782                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2783
2784         if (mac->support_autoneg) {
2785                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2786                 linkmode_copy(mac->advertising, mac->supported);
2787         } else {
2788                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2789                                    mac->supported);
2790                 linkmode_zero(mac->advertising);
2791         }
2792 }
2793
2794 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2795 {
2796         struct hclge_sfp_info_cmd *resp;
2797         struct hclge_desc desc;
2798         int ret;
2799
2800         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2801         resp = (struct hclge_sfp_info_cmd *)desc.data;
2802         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2803         if (ret == -EOPNOTSUPP) {
2804                 dev_warn(&hdev->pdev->dev,
2805                          "IMP do not support get SFP speed %d\n", ret);
2806                 return ret;
2807         } else if (ret) {
2808                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2809                 return ret;
2810         }
2811
2812         *speed = le32_to_cpu(resp->speed);
2813
2814         return 0;
2815 }
2816
2817 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2818 {
2819         struct hclge_sfp_info_cmd *resp;
2820         struct hclge_desc desc;
2821         int ret;
2822
2823         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2824         resp = (struct hclge_sfp_info_cmd *)desc.data;
2825
2826         resp->query_type = QUERY_ACTIVE_SPEED;
2827
2828         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2829         if (ret == -EOPNOTSUPP) {
2830                 dev_warn(&hdev->pdev->dev,
2831                          "IMP does not support get SFP info %d\n", ret);
2832                 return ret;
2833         } else if (ret) {
2834                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2835                 return ret;
2836         }
2837
2838         mac->speed = le32_to_cpu(resp->speed);
2839         /* if resp->speed_ability is 0, it means it's an old version
2840          * firmware, do not update these params
2841          */
2842         if (resp->speed_ability) {
2843                 mac->module_type = le32_to_cpu(resp->module_type);
2844                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2845                 mac->autoneg = resp->autoneg;
2846                 mac->support_autoneg = resp->autoneg_ability;
2847                 mac->speed_type = QUERY_ACTIVE_SPEED;
2848                 if (!resp->active_fec)
2849                         mac->fec_mode = 0;
2850                 else
2851                         mac->fec_mode = BIT(resp->active_fec);
2852         } else {
2853                 mac->speed_type = QUERY_SFP_SPEED;
2854         }
2855
2856         return 0;
2857 }
2858
2859 static int hclge_update_port_info(struct hclge_dev *hdev)
2860 {
2861         struct hclge_mac *mac = &hdev->hw.mac;
2862         int speed = HCLGE_MAC_SPEED_UNKNOWN;
2863         int ret;
2864
2865         /* get the port info from SFP cmd if not copper port */
2866         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2867                 return 0;
2868
2869         /* if IMP does not support get SFP/qSFP info, return directly */
2870         if (!hdev->support_sfp_query)
2871                 return 0;
2872
2873         if (hdev->pdev->revision >= 0x21)
2874                 ret = hclge_get_sfp_info(hdev, mac);
2875         else
2876                 ret = hclge_get_sfp_speed(hdev, &speed);
2877
2878         if (ret == -EOPNOTSUPP) {
2879                 hdev->support_sfp_query = false;
2880                 return ret;
2881         } else if (ret) {
2882                 return ret;
2883         }
2884
2885         if (hdev->pdev->revision >= 0x21) {
2886                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2887                         hclge_update_port_capability(mac);
2888                         return 0;
2889                 }
2890                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2891                                                HCLGE_MAC_FULL);
2892         } else {
2893                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2894                         return 0; /* do nothing if no SFP */
2895
2896                 /* must config full duplex for SFP */
2897                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2898         }
2899 }
2900
2901 static int hclge_get_status(struct hnae3_handle *handle)
2902 {
2903         struct hclge_vport *vport = hclge_get_vport(handle);
2904         struct hclge_dev *hdev = vport->back;
2905
2906         hclge_update_link_status(hdev);
2907
2908         return hdev->hw.mac.link;
2909 }
2910
2911 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
2912 {
2913         if (pci_num_vf(hdev->pdev) == 0) {
2914                 dev_err(&hdev->pdev->dev,
2915                         "SRIOV is disabled, can not get vport(%d) info.\n", vf);
2916                 return NULL;
2917         }
2918
2919         if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
2920                 dev_err(&hdev->pdev->dev,
2921                         "vf id(%d) is out of range(0 <= vfid < %d)\n",
2922                         vf, pci_num_vf(hdev->pdev));
2923                 return NULL;
2924         }
2925
2926         /* VF start from 1 in vport */
2927         vf += HCLGE_VF_VPORT_START_NUM;
2928         return &hdev->vport[vf];
2929 }
2930
2931 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
2932                                struct ifla_vf_info *ivf)
2933 {
2934         struct hclge_vport *vport = hclge_get_vport(handle);
2935         struct hclge_dev *hdev = vport->back;
2936
2937         vport = hclge_get_vf_vport(hdev, vf);
2938         if (!vport)
2939                 return -EINVAL;
2940
2941         ivf->vf = vf;
2942         ivf->linkstate = vport->vf_info.link_state;
2943         ivf->spoofchk = vport->vf_info.spoofchk;
2944         ivf->trusted = vport->vf_info.trusted;
2945         ivf->min_tx_rate = 0;
2946         ivf->max_tx_rate = vport->vf_info.max_tx_rate;
2947         ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
2948         ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
2949         ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
2950         ether_addr_copy(ivf->mac, vport->vf_info.mac);
2951
2952         return 0;
2953 }
2954
2955 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
2956                                    int link_state)
2957 {
2958         struct hclge_vport *vport = hclge_get_vport(handle);
2959         struct hclge_dev *hdev = vport->back;
2960
2961         vport = hclge_get_vf_vport(hdev, vf);
2962         if (!vport)
2963                 return -EINVAL;
2964
2965         vport->vf_info.link_state = link_state;
2966
2967         return 0;
2968 }
2969
2970 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2971 {
2972         u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2973
2974         /* fetch the events from their corresponding regs */
2975         rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2976         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2977         msix_src_reg = hclge_read_dev(&hdev->hw,
2978                                       HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2979
2980         /* Assumption: If by any chance reset and mailbox events are reported
2981          * together then we will only process reset event in this go and will
2982          * defer the processing of the mailbox events. Since, we would have not
2983          * cleared RX CMDQ event this time we would receive again another
2984          * interrupt from H/W just for the mailbox.
2985          *
2986          * check for vector0 reset event sources
2987          */
2988         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2989                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2990                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2991                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2992                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2993                 hdev->rst_stats.imp_rst_cnt++;
2994                 return HCLGE_VECTOR0_EVENT_RST;
2995         }
2996
2997         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2998                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2999                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3000                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3001                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3002                 hdev->rst_stats.global_rst_cnt++;
3003                 return HCLGE_VECTOR0_EVENT_RST;
3004         }
3005
3006         /* check for vector0 msix event source */
3007         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3008                 *clearval = msix_src_reg;
3009                 return HCLGE_VECTOR0_EVENT_ERR;
3010         }
3011
3012         /* check for vector0 mailbox(=CMDQ RX) event source */
3013         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3014                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3015                 *clearval = cmdq_src_reg;
3016                 return HCLGE_VECTOR0_EVENT_MBX;
3017         }
3018
3019         /* print other vector0 event source */
3020         dev_info(&hdev->pdev->dev,
3021                  "CMDQ INT status:0x%x, other INT status:0x%x\n",
3022                  cmdq_src_reg, msix_src_reg);
3023         *clearval = msix_src_reg;
3024
3025         return HCLGE_VECTOR0_EVENT_OTHER;
3026 }
3027
3028 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3029                                     u32 regclr)
3030 {
3031         switch (event_type) {
3032         case HCLGE_VECTOR0_EVENT_RST:
3033                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3034                 break;
3035         case HCLGE_VECTOR0_EVENT_MBX:
3036                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3037                 break;
3038         default:
3039                 break;
3040         }
3041 }
3042
3043 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3044 {
3045         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3046                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3047                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3048                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3049         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3050 }
3051
3052 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3053 {
3054         writel(enable ? 1 : 0, vector->addr);
3055 }
3056
3057 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3058 {
3059         struct hclge_dev *hdev = data;
3060         u32 clearval = 0;
3061         u32 event_cause;
3062
3063         hclge_enable_vector(&hdev->misc_vector, false);
3064         event_cause = hclge_check_event_cause(hdev, &clearval);
3065
3066         /* vector 0 interrupt is shared with reset and mailbox source events.*/
3067         switch (event_cause) {
3068         case HCLGE_VECTOR0_EVENT_ERR:
3069                 /* we do not know what type of reset is required now. This could
3070                  * only be decided after we fetch the type of errors which
3071                  * caused this event. Therefore, we will do below for now:
3072                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3073                  *    have defered type of reset to be used.
3074                  * 2. Schedule the reset serivce task.
3075                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3076                  *    will fetch the correct type of reset.  This would be done
3077                  *    by first decoding the types of errors.
3078                  */
3079                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3080                 /* fall through */
3081         case HCLGE_VECTOR0_EVENT_RST:
3082                 hclge_reset_task_schedule(hdev);
3083                 break;
3084         case HCLGE_VECTOR0_EVENT_MBX:
3085                 /* If we are here then,
3086                  * 1. Either we are not handling any mbx task and we are not
3087                  *    scheduled as well
3088                  *                        OR
3089                  * 2. We could be handling a mbx task but nothing more is
3090                  *    scheduled.
3091                  * In both cases, we should schedule mbx task as there are more
3092                  * mbx messages reported by this interrupt.
3093                  */
3094                 hclge_mbx_task_schedule(hdev);
3095                 break;
3096         default:
3097                 dev_warn(&hdev->pdev->dev,
3098                          "received unknown or unhandled event of vector0\n");
3099                 break;
3100         }
3101
3102         hclge_clear_event_cause(hdev, event_cause, clearval);
3103
3104         /* Enable interrupt if it is not cause by reset. And when
3105          * clearval equal to 0, it means interrupt status may be
3106          * cleared by hardware before driver reads status register.
3107          * For this case, vector0 interrupt also should be enabled.
3108          */
3109         if (!clearval ||
3110             event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3111                 hclge_enable_vector(&hdev->misc_vector, true);
3112         }
3113
3114         return IRQ_HANDLED;
3115 }
3116
3117 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3118 {
3119         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3120                 dev_warn(&hdev->pdev->dev,
3121                          "vector(vector_id %d) has been freed.\n", vector_id);
3122                 return;
3123         }
3124
3125         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3126         hdev->num_msi_left += 1;
3127         hdev->num_msi_used -= 1;
3128 }
3129
3130 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3131 {
3132         struct hclge_misc_vector *vector = &hdev->misc_vector;
3133
3134         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3135
3136         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3137         hdev->vector_status[0] = 0;
3138
3139         hdev->num_msi_left -= 1;
3140         hdev->num_msi_used += 1;
3141 }
3142
3143 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3144                                       const cpumask_t *mask)
3145 {
3146         struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3147                                               affinity_notify);
3148
3149         cpumask_copy(&hdev->affinity_mask, mask);
3150 }
3151
3152 static void hclge_irq_affinity_release(struct kref *ref)
3153 {
3154 }
3155
3156 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3157 {
3158         irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3159                               &hdev->affinity_mask);
3160
3161         hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3162         hdev->affinity_notify.release = hclge_irq_affinity_release;
3163         irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3164                                   &hdev->affinity_notify);
3165 }
3166
3167 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3168 {
3169         irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3170         irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3171 }
3172
3173 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3174 {
3175         int ret;
3176
3177         hclge_get_misc_vector(hdev);
3178
3179         /* this would be explicitly freed in the end */
3180         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3181                           0, "hclge_misc", hdev);
3182         if (ret) {
3183                 hclge_free_vector(hdev, 0);
3184                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3185                         hdev->misc_vector.vector_irq);
3186         }
3187
3188         return ret;
3189 }
3190
3191 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3192 {
3193         free_irq(hdev->misc_vector.vector_irq, hdev);
3194         hclge_free_vector(hdev, 0);
3195 }
3196
3197 int hclge_notify_client(struct hclge_dev *hdev,
3198                         enum hnae3_reset_notify_type type)
3199 {
3200         struct hnae3_client *client = hdev->nic_client;
3201         u16 i;
3202
3203         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3204                 return 0;
3205
3206         if (!client->ops->reset_notify)
3207                 return -EOPNOTSUPP;
3208
3209         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3210                 struct hnae3_handle *handle = &hdev->vport[i].nic;
3211                 int ret;
3212
3213                 ret = client->ops->reset_notify(handle, type);
3214                 if (ret) {
3215                         dev_err(&hdev->pdev->dev,
3216                                 "notify nic client failed %d(%d)\n", type, ret);
3217                         return ret;
3218                 }
3219         }
3220
3221         return 0;
3222 }
3223
3224 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3225                                     enum hnae3_reset_notify_type type)
3226 {
3227         struct hnae3_client *client = hdev->roce_client;
3228         int ret = 0;
3229         u16 i;
3230
3231         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3232                 return 0;
3233
3234         if (!client->ops->reset_notify)
3235                 return -EOPNOTSUPP;
3236
3237         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3238                 struct hnae3_handle *handle = &hdev->vport[i].roce;
3239
3240                 ret = client->ops->reset_notify(handle, type);
3241                 if (ret) {
3242                         dev_err(&hdev->pdev->dev,
3243                                 "notify roce client failed %d(%d)",
3244                                 type, ret);
3245                         return ret;
3246                 }
3247         }
3248
3249         return ret;
3250 }
3251
3252 static int hclge_reset_wait(struct hclge_dev *hdev)
3253 {
3254 #define HCLGE_RESET_WATI_MS     100
3255 #define HCLGE_RESET_WAIT_CNT    200
3256         u32 val, reg, reg_bit;
3257         u32 cnt = 0;
3258
3259         switch (hdev->reset_type) {
3260         case HNAE3_IMP_RESET:
3261                 reg = HCLGE_GLOBAL_RESET_REG;
3262                 reg_bit = HCLGE_IMP_RESET_BIT;
3263                 break;
3264         case HNAE3_GLOBAL_RESET:
3265                 reg = HCLGE_GLOBAL_RESET_REG;
3266                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3267                 break;
3268         case HNAE3_FUNC_RESET:
3269                 reg = HCLGE_FUN_RST_ING;
3270                 reg_bit = HCLGE_FUN_RST_ING_B;
3271                 break;
3272         case HNAE3_FLR_RESET:
3273                 break;
3274         default:
3275                 dev_err(&hdev->pdev->dev,
3276                         "Wait for unsupported reset type: %d\n",
3277                         hdev->reset_type);
3278                 return -EINVAL;
3279         }
3280
3281         if (hdev->reset_type == HNAE3_FLR_RESET) {
3282                 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
3283                        cnt++ < HCLGE_RESET_WAIT_CNT)
3284                         msleep(HCLGE_RESET_WATI_MS);
3285
3286                 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
3287                         dev_err(&hdev->pdev->dev,
3288                                 "flr wait timeout: %u\n", cnt);
3289                         return -EBUSY;
3290                 }
3291
3292                 return 0;
3293         }
3294
3295         val = hclge_read_dev(&hdev->hw, reg);
3296         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3297                 msleep(HCLGE_RESET_WATI_MS);
3298                 val = hclge_read_dev(&hdev->hw, reg);
3299                 cnt++;
3300         }
3301
3302         if (cnt >= HCLGE_RESET_WAIT_CNT) {
3303                 dev_warn(&hdev->pdev->dev,
3304                          "Wait for reset timeout: %d\n", hdev->reset_type);
3305                 return -EBUSY;
3306         }
3307
3308         return 0;
3309 }
3310
3311 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3312 {
3313         struct hclge_vf_rst_cmd *req;
3314         struct hclge_desc desc;
3315
3316         req = (struct hclge_vf_rst_cmd *)desc.data;
3317         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3318         req->dest_vfid = func_id;
3319
3320         if (reset)
3321                 req->vf_rst = 0x1;
3322
3323         return hclge_cmd_send(&hdev->hw, &desc, 1);
3324 }
3325
3326 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3327 {
3328         int i;
3329
3330         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3331                 struct hclge_vport *vport = &hdev->vport[i];
3332                 int ret;
3333
3334                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3335                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3336                 if (ret) {
3337                         dev_err(&hdev->pdev->dev,
3338                                 "set vf(%u) rst failed %d!\n",
3339                                 vport->vport_id, ret);
3340                         return ret;
3341                 }
3342
3343                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3344                         continue;
3345
3346                 /* Inform VF to process the reset.
3347                  * hclge_inform_reset_assert_to_vf may fail if VF
3348                  * driver is not loaded.
3349                  */
3350                 ret = hclge_inform_reset_assert_to_vf(vport);
3351                 if (ret)
3352                         dev_warn(&hdev->pdev->dev,
3353                                  "inform reset to vf(%u) failed %d!\n",
3354                                  vport->vport_id, ret);
3355         }
3356
3357         return 0;
3358 }
3359
3360 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3361 {
3362         if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3363             test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3364             test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3365                 return;
3366
3367         hclge_mbx_handler(hdev);
3368
3369         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3370 }
3371
3372 static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3373 {
3374         struct hclge_pf_rst_sync_cmd *req;
3375         struct hclge_desc desc;
3376         int cnt = 0;
3377         int ret;
3378
3379         req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3380         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3381
3382         do {
3383                 /* vf need to down netdev by mbx during PF or FLR reset */
3384                 hclge_mailbox_service_task(hdev);
3385
3386                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3387                 /* for compatible with old firmware, wait
3388                  * 100 ms for VF to stop IO
3389                  */
3390                 if (ret == -EOPNOTSUPP) {
3391                         msleep(HCLGE_RESET_SYNC_TIME);
3392                         return 0;
3393                 } else if (ret) {
3394                         dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n",
3395                                 ret);
3396                         return ret;
3397                 } else if (req->all_vf_ready) {
3398                         return 0;
3399                 }
3400                 msleep(HCLGE_PF_RESET_SYNC_TIME);
3401                 hclge_cmd_reuse_desc(&desc, true);
3402         } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3403
3404         dev_err(&hdev->pdev->dev, "sync with VF timeout!\n");
3405         return -ETIME;
3406 }
3407
3408 void hclge_report_hw_error(struct hclge_dev *hdev,
3409                            enum hnae3_hw_error_type type)
3410 {
3411         struct hnae3_client *client = hdev->nic_client;
3412         u16 i;
3413
3414         if (!client || !client->ops->process_hw_error ||
3415             !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3416                 return;
3417
3418         for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3419                 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3420 }
3421
3422 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3423 {
3424         u32 reg_val;
3425
3426         reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3427         if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3428                 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3429                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3430                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3431         }
3432
3433         if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3434                 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3435                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3436                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3437         }
3438 }
3439
3440 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3441 {
3442         struct hclge_desc desc;
3443         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3444         int ret;
3445
3446         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3447         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3448         req->fun_reset_vfid = func_id;
3449
3450         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3451         if (ret)
3452                 dev_err(&hdev->pdev->dev,
3453                         "send function reset cmd fail, status =%d\n", ret);
3454
3455         return ret;
3456 }
3457
3458 static void hclge_do_reset(struct hclge_dev *hdev)
3459 {
3460         struct hnae3_handle *handle = &hdev->vport[0].nic;
3461         struct pci_dev *pdev = hdev->pdev;
3462         u32 val;
3463
3464         if (hclge_get_hw_reset_stat(handle)) {
3465                 dev_info(&pdev->dev, "Hardware reset not finish\n");
3466                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3467                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3468                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3469                 return;
3470         }
3471
3472         switch (hdev->reset_type) {
3473         case HNAE3_GLOBAL_RESET:
3474                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3475                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3476                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3477                 dev_info(&pdev->dev, "Global Reset requested\n");
3478                 break;
3479         case HNAE3_FUNC_RESET:
3480                 dev_info(&pdev->dev, "PF Reset requested\n");
3481                 /* schedule again to check later */
3482                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3483                 hclge_reset_task_schedule(hdev);
3484                 break;
3485         case HNAE3_FLR_RESET:
3486                 dev_info(&pdev->dev, "FLR requested\n");
3487                 /* schedule again to check later */
3488                 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3489                 hclge_reset_task_schedule(hdev);
3490                 break;
3491         default:
3492                 dev_warn(&pdev->dev,
3493                          "Unsupported reset type: %d\n", hdev->reset_type);
3494                 break;
3495         }
3496 }
3497
3498 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3499                                                    unsigned long *addr)
3500 {
3501         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3502         struct hclge_dev *hdev = ae_dev->priv;
3503
3504         /* first, resolve any unknown reset type to the known type(s) */
3505         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3506                 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3507                                         HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
3508                 /* we will intentionally ignore any errors from this function
3509                  *  as we will end up in *some* reset request in any case
3510                  */
3511                 if (hclge_handle_hw_msix_error(hdev, addr))
3512                         dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3513                                  msix_sts_reg);
3514
3515                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3516                 /* We defered the clearing of the error event which caused
3517                  * interrupt since it was not posssible to do that in
3518                  * interrupt context (and this is the reason we introduced
3519                  * new UNKNOWN reset type). Now, the errors have been
3520                  * handled and cleared in hardware we can safely enable
3521                  * interrupts. This is an exception to the norm.
3522                  */
3523                 hclge_enable_vector(&hdev->misc_vector, true);
3524         }
3525
3526         /* return the highest priority reset level amongst all */
3527         if (test_bit(HNAE3_IMP_RESET, addr)) {
3528                 rst_level = HNAE3_IMP_RESET;
3529                 clear_bit(HNAE3_IMP_RESET, addr);
3530                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3531                 clear_bit(HNAE3_FUNC_RESET, addr);
3532         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3533                 rst_level = HNAE3_GLOBAL_RESET;
3534                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3535                 clear_bit(HNAE3_FUNC_RESET, addr);
3536         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3537                 rst_level = HNAE3_FUNC_RESET;
3538                 clear_bit(HNAE3_FUNC_RESET, addr);
3539         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3540                 rst_level = HNAE3_FLR_RESET;
3541                 clear_bit(HNAE3_FLR_RESET, addr);
3542         }
3543
3544         if (hdev->reset_type != HNAE3_NONE_RESET &&
3545             rst_level < hdev->reset_type)
3546                 return HNAE3_NONE_RESET;
3547
3548         return rst_level;
3549 }
3550
3551 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3552 {
3553         u32 clearval = 0;
3554
3555         switch (hdev->reset_type) {
3556         case HNAE3_IMP_RESET:
3557                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3558                 break;
3559         case HNAE3_GLOBAL_RESET:
3560                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3561                 break;
3562         default:
3563                 break;
3564         }
3565
3566         if (!clearval)
3567                 return;
3568
3569         /* For revision 0x20, the reset interrupt source
3570          * can only be cleared after hardware reset done
3571          */
3572         if (hdev->pdev->revision == 0x20)
3573                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3574                                 clearval);
3575
3576         hclge_enable_vector(&hdev->misc_vector, true);
3577 }
3578
3579 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3580 {
3581         int ret = 0;
3582
3583         switch (hdev->reset_type) {
3584         case HNAE3_FUNC_RESET:
3585                 /* fall through */
3586         case HNAE3_FLR_RESET:
3587                 ret = hclge_set_all_vf_rst(hdev, true);
3588                 break;
3589         default:
3590                 break;
3591         }
3592
3593         return ret;
3594 }
3595
3596 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3597 {
3598         u32 reg_val;
3599
3600         reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3601         if (enable)
3602                 reg_val |= HCLGE_NIC_SW_RST_RDY;
3603         else
3604                 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3605
3606         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3607 }
3608
3609 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3610 {
3611         u32 reg_val;
3612         int ret = 0;
3613
3614         switch (hdev->reset_type) {
3615         case HNAE3_FUNC_RESET:
3616                 /* to confirm whether all running VF is ready
3617                  * before request PF reset
3618                  */
3619                 ret = hclge_func_reset_sync_vf(hdev);
3620                 if (ret)
3621                         return ret;
3622
3623                 ret = hclge_func_reset_cmd(hdev, 0);
3624                 if (ret) {
3625                         dev_err(&hdev->pdev->dev,
3626                                 "asserting function reset fail %d!\n", ret);
3627                         return ret;
3628                 }
3629
3630                 /* After performaning pf reset, it is not necessary to do the
3631                  * mailbox handling or send any command to firmware, because
3632                  * any mailbox handling or command to firmware is only valid
3633                  * after hclge_cmd_init is called.
3634                  */
3635                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3636                 hdev->rst_stats.pf_rst_cnt++;
3637                 break;
3638         case HNAE3_FLR_RESET:
3639                 /* to confirm whether all running VF is ready
3640                  * before request PF reset
3641                  */
3642                 ret = hclge_func_reset_sync_vf(hdev);
3643                 if (ret)
3644                         return ret;
3645
3646                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3647                 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3648                 hdev->rst_stats.flr_rst_cnt++;
3649                 break;
3650         case HNAE3_IMP_RESET:
3651                 hclge_handle_imp_error(hdev);
3652                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3653                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3654                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3655                 break;
3656         default:
3657                 break;
3658         }
3659
3660         /* inform hardware that preparatory work is done */
3661         msleep(HCLGE_RESET_SYNC_TIME);
3662         hclge_reset_handshake(hdev, true);
3663         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3664
3665         return ret;
3666 }
3667
3668 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3669 {
3670 #define MAX_RESET_FAIL_CNT 5
3671
3672         if (hdev->reset_pending) {
3673                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3674                          hdev->reset_pending);
3675                 return true;
3676         } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3677                    HCLGE_RESET_INT_M) {
3678                 dev_info(&hdev->pdev->dev,
3679                          "reset failed because new reset interrupt\n");
3680                 hclge_clear_reset_cause(hdev);
3681                 return false;
3682         } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3683                 hdev->rst_stats.reset_fail_cnt++;
3684                 set_bit(hdev->reset_type, &hdev->reset_pending);
3685                 dev_info(&hdev->pdev->dev,
3686                          "re-schedule reset task(%u)\n",
3687                          hdev->rst_stats.reset_fail_cnt);
3688                 return true;
3689         }
3690
3691         hclge_clear_reset_cause(hdev);
3692
3693         /* recover the handshake status when reset fail */
3694         hclge_reset_handshake(hdev, true);
3695
3696         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3697
3698         hclge_dbg_dump_rst_info(hdev);
3699
3700         set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3701
3702         return false;
3703 }
3704
3705 static int hclge_set_rst_done(struct hclge_dev *hdev)
3706 {
3707         struct hclge_pf_rst_done_cmd *req;
3708         struct hclge_desc desc;
3709         int ret;
3710
3711         req = (struct hclge_pf_rst_done_cmd *)desc.data;
3712         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3713         req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3714
3715         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3716         /* To be compatible with the old firmware, which does not support
3717          * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3718          * return success
3719          */
3720         if (ret == -EOPNOTSUPP) {
3721                 dev_warn(&hdev->pdev->dev,
3722                          "current firmware does not support command(0x%x)!\n",
3723                          HCLGE_OPC_PF_RST_DONE);
3724                 return 0;
3725         } else if (ret) {
3726                 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3727                         ret);
3728         }
3729
3730         return ret;
3731 }
3732
3733 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3734 {
3735         int ret = 0;
3736
3737         switch (hdev->reset_type) {
3738         case HNAE3_FUNC_RESET:
3739                 /* fall through */
3740         case HNAE3_FLR_RESET:
3741                 ret = hclge_set_all_vf_rst(hdev, false);
3742                 break;
3743         case HNAE3_GLOBAL_RESET:
3744                 /* fall through */
3745         case HNAE3_IMP_RESET:
3746                 ret = hclge_set_rst_done(hdev);
3747                 break;
3748         default:
3749                 break;
3750         }
3751
3752         /* clear up the handshake status after re-initialize done */
3753         hclge_reset_handshake(hdev, false);
3754
3755         return ret;
3756 }
3757
3758 static int hclge_reset_stack(struct hclge_dev *hdev)
3759 {
3760         int ret;
3761
3762         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3763         if (ret)
3764                 return ret;
3765
3766         ret = hclge_reset_ae_dev(hdev->ae_dev);
3767         if (ret)
3768                 return ret;
3769
3770         ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3771         if (ret)
3772                 return ret;
3773
3774         return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3775 }
3776
3777 static void hclge_reset(struct hclge_dev *hdev)
3778 {
3779         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3780         enum hnae3_reset_type reset_level;
3781         int ret;
3782
3783         /* Initialize ae_dev reset status as well, in case enet layer wants to
3784          * know if device is undergoing reset
3785          */
3786         ae_dev->reset_type = hdev->reset_type;
3787         hdev->rst_stats.reset_cnt++;
3788         /* perform reset of the stack & ae device for a client */
3789         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3790         if (ret)
3791                 goto err_reset;
3792
3793         ret = hclge_reset_prepare_down(hdev);
3794         if (ret)
3795                 goto err_reset;
3796
3797         rtnl_lock();
3798         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3799         if (ret)
3800                 goto err_reset_lock;
3801
3802         rtnl_unlock();
3803
3804         ret = hclge_reset_prepare_wait(hdev);
3805         if (ret)
3806                 goto err_reset;
3807
3808         if (hclge_reset_wait(hdev))
3809                 goto err_reset;
3810
3811         hdev->rst_stats.hw_reset_done_cnt++;
3812
3813         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3814         if (ret)
3815                 goto err_reset;
3816
3817         rtnl_lock();
3818
3819         ret = hclge_reset_stack(hdev);
3820         if (ret)
3821                 goto err_reset_lock;
3822
3823         hclge_clear_reset_cause(hdev);
3824
3825         ret = hclge_reset_prepare_up(hdev);
3826         if (ret)
3827                 goto err_reset_lock;
3828
3829         rtnl_unlock();
3830
3831         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3832         /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3833          * times
3834          */
3835         if (ret &&
3836             hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3837                 goto err_reset;
3838
3839         rtnl_lock();
3840
3841         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3842         if (ret)
3843                 goto err_reset_lock;
3844
3845         rtnl_unlock();
3846
3847         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3848         if (ret)
3849                 goto err_reset;
3850
3851         hdev->last_reset_time = jiffies;
3852         hdev->rst_stats.reset_fail_cnt = 0;
3853         hdev->rst_stats.reset_done_cnt++;
3854         ae_dev->reset_type = HNAE3_NONE_RESET;
3855         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3856
3857         /* if default_reset_request has a higher level reset request,
3858          * it should be handled as soon as possible. since some errors
3859          * need this kind of reset to fix.
3860          */
3861         reset_level = hclge_get_reset_level(ae_dev,
3862                                             &hdev->default_reset_request);
3863         if (reset_level != HNAE3_NONE_RESET)
3864                 set_bit(reset_level, &hdev->reset_request);
3865
3866         return;
3867
3868 err_reset_lock:
3869         rtnl_unlock();
3870 err_reset:
3871         if (hclge_reset_err_handle(hdev))
3872                 hclge_reset_task_schedule(hdev);
3873 }
3874
3875 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3876 {
3877         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3878         struct hclge_dev *hdev = ae_dev->priv;
3879
3880         /* We might end up getting called broadly because of 2 below cases:
3881          * 1. Recoverable error was conveyed through APEI and only way to bring
3882          *    normalcy is to reset.
3883          * 2. A new reset request from the stack due to timeout
3884          *
3885          * For the first case,error event might not have ae handle available.
3886          * check if this is a new reset request and we are not here just because
3887          * last reset attempt did not succeed and watchdog hit us again. We will
3888          * know this if last reset request did not occur very recently (watchdog
3889          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3890          * In case of new request we reset the "reset level" to PF reset.
3891          * And if it is a repeat reset request of the most recent one then we
3892          * want to make sure we throttle the reset request. Therefore, we will
3893          * not allow it again before 3*HZ times.
3894          */
3895         if (!handle)
3896                 handle = &hdev->vport[0].nic;
3897
3898         if (time_before(jiffies, (hdev->last_reset_time +
3899                                   HCLGE_RESET_INTERVAL))) {
3900                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3901                 return;
3902         } else if (hdev->default_reset_request) {
3903                 hdev->reset_level =
3904                         hclge_get_reset_level(ae_dev,
3905                                               &hdev->default_reset_request);
3906         } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3907                 hdev->reset_level = HNAE3_FUNC_RESET;
3908         }
3909
3910         dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3911                  hdev->reset_level);
3912
3913         /* request reset & schedule reset task */
3914         set_bit(hdev->reset_level, &hdev->reset_request);
3915         hclge_reset_task_schedule(hdev);
3916
3917         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3918                 hdev->reset_level++;
3919 }
3920
3921 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3922                                         enum hnae3_reset_type rst_type)
3923 {
3924         struct hclge_dev *hdev = ae_dev->priv;
3925
3926         set_bit(rst_type, &hdev->default_reset_request);
3927 }
3928
3929 static void hclge_reset_timer(struct timer_list *t)
3930 {
3931         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3932
3933         /* if default_reset_request has no value, it means that this reset
3934          * request has already be handled, so just return here
3935          */
3936         if (!hdev->default_reset_request)
3937                 return;
3938
3939         dev_info(&hdev->pdev->dev,
3940                  "triggering reset in reset timer\n");
3941         hclge_reset_event(hdev->pdev, NULL);
3942 }
3943
3944 static void hclge_reset_subtask(struct hclge_dev *hdev)
3945 {
3946         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3947
3948         /* check if there is any ongoing reset in the hardware. This status can
3949          * be checked from reset_pending. If there is then, we need to wait for
3950          * hardware to complete reset.
3951          *    a. If we are able to figure out in reasonable time that hardware
3952          *       has fully resetted then, we can proceed with driver, client
3953          *       reset.
3954          *    b. else, we can come back later to check this status so re-sched
3955          *       now.
3956          */
3957         hdev->last_reset_time = jiffies;
3958         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3959         if (hdev->reset_type != HNAE3_NONE_RESET)
3960                 hclge_reset(hdev);
3961
3962         /* check if we got any *new* reset requests to be honored */
3963         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3964         if (hdev->reset_type != HNAE3_NONE_RESET)
3965                 hclge_do_reset(hdev);
3966
3967         hdev->reset_type = HNAE3_NONE_RESET;
3968 }
3969
3970 static void hclge_reset_service_task(struct hclge_dev *hdev)
3971 {
3972         if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
3973                 return;
3974
3975         if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3976                 return;
3977
3978         hclge_reset_subtask(hdev);
3979
3980         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3981 }
3982
3983 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3984 {
3985         int i;
3986
3987         /* start from vport 1 for PF is always alive */
3988         for (i = 1; i < hdev->num_alloc_vport; i++) {
3989                 struct hclge_vport *vport = &hdev->vport[i];
3990
3991                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3992                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3993
3994                 /* If vf is not alive, set to default value */
3995                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3996                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3997         }
3998 }
3999
4000 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4001 {
4002         unsigned long delta = round_jiffies_relative(HZ);
4003
4004         /* Always handle the link updating to make sure link state is
4005          * updated when it is triggered by mbx.
4006          */
4007         hclge_update_link_status(hdev);
4008
4009         if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4010                 delta = jiffies - hdev->last_serv_processed;
4011
4012                 if (delta < round_jiffies_relative(HZ)) {
4013                         delta = round_jiffies_relative(HZ) - delta;
4014                         goto out;
4015                 }
4016         }
4017
4018         hdev->serv_processed_cnt++;
4019         hclge_update_vport_alive(hdev);
4020
4021         if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4022                 hdev->last_serv_processed = jiffies;
4023                 goto out;
4024         }
4025
4026         if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4027                 hclge_update_stats_for_all(hdev);
4028
4029         hclge_update_port_info(hdev);
4030         hclge_sync_vlan_filter(hdev);
4031
4032         if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4033                 hclge_rfs_filter_expire(hdev);
4034
4035         hdev->last_serv_processed = jiffies;
4036
4037 out:
4038         hclge_task_schedule(hdev, delta);
4039 }
4040
4041 static void hclge_service_task(struct work_struct *work)
4042 {
4043         struct hclge_dev *hdev =
4044                 container_of(work, struct hclge_dev, service_task.work);
4045
4046         hclge_reset_service_task(hdev);
4047         hclge_mailbox_service_task(hdev);
4048         hclge_periodic_service_task(hdev);
4049
4050         /* Handle reset and mbx again in case periodical task delays the
4051          * handling by calling hclge_task_schedule() in
4052          * hclge_periodic_service_task().
4053          */
4054         hclge_reset_service_task(hdev);
4055         hclge_mailbox_service_task(hdev);
4056 }
4057
4058 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4059 {
4060         /* VF handle has no client */
4061         if (!handle->client)
4062                 return container_of(handle, struct hclge_vport, nic);
4063         else if (handle->client->type == HNAE3_CLIENT_ROCE)
4064                 return container_of(handle, struct hclge_vport, roce);
4065         else
4066                 return container_of(handle, struct hclge_vport, nic);
4067 }
4068
4069 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4070                             struct hnae3_vector_info *vector_info)
4071 {
4072         struct hclge_vport *vport = hclge_get_vport(handle);
4073         struct hnae3_vector_info *vector = vector_info;
4074         struct hclge_dev *hdev = vport->back;
4075         int alloc = 0;
4076         int i, j;
4077
4078         vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4079         vector_num = min(hdev->num_msi_left, vector_num);
4080
4081         for (j = 0; j < vector_num; j++) {
4082                 for (i = 1; i < hdev->num_msi; i++) {
4083                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4084                                 vector->vector = pci_irq_vector(hdev->pdev, i);
4085                                 vector->io_addr = hdev->hw.io_base +
4086                                         HCLGE_VECTOR_REG_BASE +
4087                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
4088                                         vport->vport_id *
4089                                         HCLGE_VECTOR_VF_OFFSET;
4090                                 hdev->vector_status[i] = vport->vport_id;
4091                                 hdev->vector_irq[i] = vector->vector;
4092
4093                                 vector++;
4094                                 alloc++;
4095
4096                                 break;
4097                         }
4098                 }
4099         }
4100         hdev->num_msi_left -= alloc;
4101         hdev->num_msi_used += alloc;
4102
4103         return alloc;
4104 }
4105
4106 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4107 {
4108         int i;
4109
4110         for (i = 0; i < hdev->num_msi; i++)
4111                 if (vector == hdev->vector_irq[i])
4112                         return i;
4113
4114         return -EINVAL;
4115 }
4116
4117 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4118 {
4119         struct hclge_vport *vport = hclge_get_vport(handle);
4120         struct hclge_dev *hdev = vport->back;
4121         int vector_id;
4122
4123         vector_id = hclge_get_vector_index(hdev, vector);
4124         if (vector_id < 0) {
4125                 dev_err(&hdev->pdev->dev,
4126                         "Get vector index fail. vector_id =%d\n", vector_id);
4127                 return vector_id;
4128         }
4129
4130         hclge_free_vector(hdev, vector_id);
4131
4132         return 0;
4133 }
4134
4135 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4136 {
4137         return HCLGE_RSS_KEY_SIZE;
4138 }
4139
4140 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4141 {
4142         return HCLGE_RSS_IND_TBL_SIZE;
4143 }
4144
4145 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4146                                   const u8 hfunc, const u8 *key)
4147 {
4148         struct hclge_rss_config_cmd *req;
4149         unsigned int key_offset = 0;
4150         struct hclge_desc desc;
4151         int key_counts;
4152         int key_size;
4153         int ret;
4154
4155         key_counts = HCLGE_RSS_KEY_SIZE;
4156         req = (struct hclge_rss_config_cmd *)desc.data;
4157
4158         while (key_counts) {
4159                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4160                                            false);
4161
4162                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4163                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4164
4165                 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4166                 memcpy(req->hash_key,
4167                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4168
4169                 key_counts -= key_size;
4170                 key_offset++;
4171                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4172                 if (ret) {
4173                         dev_err(&hdev->pdev->dev,
4174                                 "Configure RSS config fail, status = %d\n",
4175                                 ret);
4176                         return ret;
4177                 }
4178         }
4179         return 0;
4180 }
4181
4182 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4183 {
4184         struct hclge_rss_indirection_table_cmd *req;
4185         struct hclge_desc desc;
4186         int i, j;
4187         int ret;
4188
4189         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4190
4191         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4192                 hclge_cmd_setup_basic_desc
4193                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4194
4195                 req->start_table_index =
4196                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4197                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4198
4199                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4200                         req->rss_result[j] =
4201                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4202
4203                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4204                 if (ret) {
4205                         dev_err(&hdev->pdev->dev,
4206                                 "Configure rss indir table fail,status = %d\n",
4207                                 ret);
4208                         return ret;
4209                 }
4210         }
4211         return 0;
4212 }
4213
4214 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4215                                  u16 *tc_size, u16 *tc_offset)
4216 {
4217         struct hclge_rss_tc_mode_cmd *req;
4218         struct hclge_desc desc;
4219         int ret;
4220         int i;
4221
4222         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4223         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4224
4225         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4226                 u16 mode = 0;
4227
4228                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4229                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4230                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4231                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4232                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4233
4234                 req->rss_tc_mode[i] = cpu_to_le16(mode);
4235         }
4236
4237         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4238         if (ret)
4239                 dev_err(&hdev->pdev->dev,
4240                         "Configure rss tc mode fail, status = %d\n", ret);
4241
4242         return ret;
4243 }
4244
4245 static void hclge_get_rss_type(struct hclge_vport *vport)
4246 {
4247         if (vport->rss_tuple_sets.ipv4_tcp_en ||
4248             vport->rss_tuple_sets.ipv4_udp_en ||
4249             vport->rss_tuple_sets.ipv4_sctp_en ||
4250             vport->rss_tuple_sets.ipv6_tcp_en ||
4251             vport->rss_tuple_sets.ipv6_udp_en ||
4252             vport->rss_tuple_sets.ipv6_sctp_en)
4253                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4254         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4255                  vport->rss_tuple_sets.ipv6_fragment_en)
4256                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4257         else
4258                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4259 }
4260
4261 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4262 {
4263         struct hclge_rss_input_tuple_cmd *req;
4264         struct hclge_desc desc;
4265         int ret;
4266
4267         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4268
4269         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4270
4271         /* Get the tuple cfg from pf */
4272         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4273         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4274         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4275         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4276         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4277         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4278         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4279         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4280         hclge_get_rss_type(&hdev->vport[0]);
4281         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4282         if (ret)
4283                 dev_err(&hdev->pdev->dev,
4284                         "Configure rss input fail, status = %d\n", ret);
4285         return ret;
4286 }
4287
4288 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4289                          u8 *key, u8 *hfunc)
4290 {
4291         struct hclge_vport *vport = hclge_get_vport(handle);
4292         int i;
4293
4294         /* Get hash algorithm */
4295         if (hfunc) {
4296                 switch (vport->rss_algo) {
4297                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4298                         *hfunc = ETH_RSS_HASH_TOP;
4299                         break;
4300                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4301                         *hfunc = ETH_RSS_HASH_XOR;
4302                         break;
4303                 default:
4304                         *hfunc = ETH_RSS_HASH_UNKNOWN;
4305                         break;
4306                 }
4307         }
4308
4309         /* Get the RSS Key required by the user */
4310         if (key)
4311                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4312
4313         /* Get indirect table */
4314         if (indir)
4315                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4316                         indir[i] =  vport->rss_indirection_tbl[i];
4317
4318         return 0;
4319 }
4320
4321 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4322                          const  u8 *key, const  u8 hfunc)
4323 {
4324         struct hclge_vport *vport = hclge_get_vport(handle);
4325         struct hclge_dev *hdev = vport->back;
4326         u8 hash_algo;
4327         int ret, i;
4328
4329         /* Set the RSS Hash Key if specififed by the user */
4330         if (key) {
4331                 switch (hfunc) {
4332                 case ETH_RSS_HASH_TOP:
4333                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4334                         break;
4335                 case ETH_RSS_HASH_XOR:
4336                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4337                         break;
4338                 case ETH_RSS_HASH_NO_CHANGE:
4339                         hash_algo = vport->rss_algo;
4340                         break;
4341                 default:
4342                         return -EINVAL;
4343                 }
4344
4345                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4346                 if (ret)
4347                         return ret;
4348
4349                 /* Update the shadow RSS key with user specified qids */
4350                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4351                 vport->rss_algo = hash_algo;
4352         }
4353
4354         /* Update the shadow RSS table with user specified qids */
4355         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4356                 vport->rss_indirection_tbl[i] = indir[i];
4357
4358         /* Update the hardware */
4359         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4360 }
4361
4362 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4363 {
4364         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4365
4366         if (nfc->data & RXH_L4_B_2_3)
4367                 hash_sets |= HCLGE_D_PORT_BIT;
4368         else
4369                 hash_sets &= ~HCLGE_D_PORT_BIT;
4370
4371         if (nfc->data & RXH_IP_SRC)
4372                 hash_sets |= HCLGE_S_IP_BIT;
4373         else
4374                 hash_sets &= ~HCLGE_S_IP_BIT;
4375
4376         if (nfc->data & RXH_IP_DST)
4377                 hash_sets |= HCLGE_D_IP_BIT;
4378         else
4379                 hash_sets &= ~HCLGE_D_IP_BIT;
4380
4381         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4382                 hash_sets |= HCLGE_V_TAG_BIT;
4383
4384         return hash_sets;
4385 }
4386
4387 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4388                                struct ethtool_rxnfc *nfc)
4389 {
4390         struct hclge_vport *vport = hclge_get_vport(handle);
4391         struct hclge_dev *hdev = vport->back;
4392         struct hclge_rss_input_tuple_cmd *req;
4393         struct hclge_desc desc;
4394         u8 tuple_sets;
4395         int ret;
4396
4397         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4398                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
4399                 return -EINVAL;
4400
4401         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4402         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4403
4404         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4405         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4406         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4407         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4408         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4409         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4410         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4411         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4412
4413         tuple_sets = hclge_get_rss_hash_bits(nfc);
4414         switch (nfc->flow_type) {
4415         case TCP_V4_FLOW:
4416                 req->ipv4_tcp_en = tuple_sets;
4417                 break;
4418         case TCP_V6_FLOW:
4419                 req->ipv6_tcp_en = tuple_sets;
4420                 break;
4421         case UDP_V4_FLOW:
4422                 req->ipv4_udp_en = tuple_sets;
4423                 break;
4424         case UDP_V6_FLOW:
4425                 req->ipv6_udp_en = tuple_sets;
4426                 break;
4427         case SCTP_V4_FLOW:
4428                 req->ipv4_sctp_en = tuple_sets;
4429                 break;
4430         case SCTP_V6_FLOW:
4431                 if ((nfc->data & RXH_L4_B_0_1) ||
4432                     (nfc->data & RXH_L4_B_2_3))
4433                         return -EINVAL;
4434
4435                 req->ipv6_sctp_en = tuple_sets;
4436                 break;
4437         case IPV4_FLOW:
4438                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4439                 break;
4440         case IPV6_FLOW:
4441                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4442                 break;
4443         default:
4444                 return -EINVAL;
4445         }
4446
4447         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4448         if (ret) {
4449                 dev_err(&hdev->pdev->dev,
4450                         "Set rss tuple fail, status = %d\n", ret);
4451                 return ret;
4452         }
4453
4454         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4455         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4456         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4457         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4458         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4459         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4460         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4461         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4462         hclge_get_rss_type(vport);
4463         return 0;
4464 }
4465
4466 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4467                                struct ethtool_rxnfc *nfc)
4468 {
4469         struct hclge_vport *vport = hclge_get_vport(handle);
4470         u8 tuple_sets;
4471
4472         nfc->data = 0;
4473
4474         switch (nfc->flow_type) {
4475         case TCP_V4_FLOW:
4476                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4477                 break;
4478         case UDP_V4_FLOW:
4479                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4480                 break;
4481         case TCP_V6_FLOW:
4482                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4483                 break;
4484         case UDP_V6_FLOW:
4485                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4486                 break;
4487         case SCTP_V4_FLOW:
4488                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4489                 break;
4490         case SCTP_V6_FLOW:
4491                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4492                 break;
4493         case IPV4_FLOW:
4494         case IPV6_FLOW:
4495                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4496                 break;
4497         default:
4498                 return -EINVAL;
4499         }
4500
4501         if (!tuple_sets)
4502                 return 0;
4503
4504         if (tuple_sets & HCLGE_D_PORT_BIT)
4505                 nfc->data |= RXH_L4_B_2_3;
4506         if (tuple_sets & HCLGE_S_PORT_BIT)
4507                 nfc->data |= RXH_L4_B_0_1;
4508         if (tuple_sets & HCLGE_D_IP_BIT)
4509                 nfc->data |= RXH_IP_DST;
4510         if (tuple_sets & HCLGE_S_IP_BIT)
4511                 nfc->data |= RXH_IP_SRC;
4512
4513         return 0;
4514 }
4515
4516 static int hclge_get_tc_size(struct hnae3_handle *handle)
4517 {
4518         struct hclge_vport *vport = hclge_get_vport(handle);
4519         struct hclge_dev *hdev = vport->back;
4520
4521         return hdev->rss_size_max;
4522 }
4523
4524 int hclge_rss_init_hw(struct hclge_dev *hdev)
4525 {
4526         struct hclge_vport *vport = hdev->vport;
4527         u8 *rss_indir = vport[0].rss_indirection_tbl;
4528         u16 rss_size = vport[0].alloc_rss_size;
4529         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4530         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4531         u8 *key = vport[0].rss_hash_key;
4532         u8 hfunc = vport[0].rss_algo;
4533         u16 tc_valid[HCLGE_MAX_TC_NUM];
4534         u16 roundup_size;
4535         unsigned int i;
4536         int ret;
4537
4538         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4539         if (ret)
4540                 return ret;
4541
4542         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4543         if (ret)
4544                 return ret;
4545
4546         ret = hclge_set_rss_input_tuple(hdev);
4547         if (ret)
4548                 return ret;
4549
4550         /* Each TC have the same queue size, and tc_size set to hardware is
4551          * the log2 of roundup power of two of rss_size, the acutal queue
4552          * size is limited by indirection table.
4553          */
4554         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4555                 dev_err(&hdev->pdev->dev,
4556                         "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4557                         rss_size);
4558                 return -EINVAL;
4559         }
4560
4561         roundup_size = roundup_pow_of_two(rss_size);
4562         roundup_size = ilog2(roundup_size);
4563
4564         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4565                 tc_valid[i] = 0;
4566
4567                 if (!(hdev->hw_tc_map & BIT(i)))
4568                         continue;
4569
4570                 tc_valid[i] = 1;
4571                 tc_size[i] = roundup_size;
4572                 tc_offset[i] = rss_size * i;
4573         }
4574
4575         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4576 }
4577
4578 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4579 {
4580         struct hclge_vport *vport = hdev->vport;
4581         int i, j;
4582
4583         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4584                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4585                         vport[j].rss_indirection_tbl[i] =
4586                                 i % vport[j].alloc_rss_size;
4587         }
4588 }
4589
4590 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4591 {
4592         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4593         struct hclge_vport *vport = hdev->vport;
4594
4595         if (hdev->pdev->revision >= 0x21)
4596                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4597
4598         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4599                 vport[i].rss_tuple_sets.ipv4_tcp_en =
4600                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4601                 vport[i].rss_tuple_sets.ipv4_udp_en =
4602                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4603                 vport[i].rss_tuple_sets.ipv4_sctp_en =
4604                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4605                 vport[i].rss_tuple_sets.ipv4_fragment_en =
4606                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4607                 vport[i].rss_tuple_sets.ipv6_tcp_en =
4608                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4609                 vport[i].rss_tuple_sets.ipv6_udp_en =
4610                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4611                 vport[i].rss_tuple_sets.ipv6_sctp_en =
4612                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4613                 vport[i].rss_tuple_sets.ipv6_fragment_en =
4614                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4615
4616                 vport[i].rss_algo = rss_algo;
4617
4618                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4619                        HCLGE_RSS_KEY_SIZE);
4620         }
4621
4622         hclge_rss_indir_init_cfg(hdev);
4623 }
4624
4625 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4626                                 int vector_id, bool en,
4627                                 struct hnae3_ring_chain_node *ring_chain)
4628 {
4629         struct hclge_dev *hdev = vport->back;
4630         struct hnae3_ring_chain_node *node;
4631         struct hclge_desc desc;
4632         struct hclge_ctrl_vector_chain_cmd *req =
4633                 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4634         enum hclge_cmd_status status;
4635         enum hclge_opcode_type op;
4636         u16 tqp_type_and_id;
4637         int i;
4638
4639         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4640         hclge_cmd_setup_basic_desc(&desc, op, false);
4641         req->int_vector_id = vector_id;
4642
4643         i = 0;
4644         for (node = ring_chain; node; node = node->next) {
4645                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4646                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4647                                 HCLGE_INT_TYPE_S,
4648                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4649                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4650                                 HCLGE_TQP_ID_S, node->tqp_index);
4651                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4652                                 HCLGE_INT_GL_IDX_S,
4653                                 hnae3_get_field(node->int_gl_idx,
4654                                                 HNAE3_RING_GL_IDX_M,
4655                                                 HNAE3_RING_GL_IDX_S));
4656                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4657                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4658                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4659                         req->vfid = vport->vport_id;
4660
4661                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4662                         if (status) {
4663                                 dev_err(&hdev->pdev->dev,
4664                                         "Map TQP fail, status is %d.\n",
4665                                         status);
4666                                 return -EIO;
4667                         }
4668                         i = 0;
4669
4670                         hclge_cmd_setup_basic_desc(&desc,
4671                                                    op,
4672                                                    false);
4673                         req->int_vector_id = vector_id;
4674                 }
4675         }
4676
4677         if (i > 0) {
4678                 req->int_cause_num = i;
4679                 req->vfid = vport->vport_id;
4680                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4681                 if (status) {
4682                         dev_err(&hdev->pdev->dev,
4683                                 "Map TQP fail, status is %d.\n", status);
4684                         return -EIO;
4685                 }
4686         }
4687
4688         return 0;
4689 }
4690
4691 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4692                                     struct hnae3_ring_chain_node *ring_chain)
4693 {
4694         struct hclge_vport *vport = hclge_get_vport(handle);
4695         struct hclge_dev *hdev = vport->back;
4696         int vector_id;
4697
4698         vector_id = hclge_get_vector_index(hdev, vector);
4699         if (vector_id < 0) {
4700                 dev_err(&hdev->pdev->dev,
4701                         "Get vector index fail. vector_id =%d\n", vector_id);
4702                 return vector_id;
4703         }
4704
4705         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4706 }
4707
4708 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4709                                        struct hnae3_ring_chain_node *ring_chain)
4710 {
4711         struct hclge_vport *vport = hclge_get_vport(handle);
4712         struct hclge_dev *hdev = vport->back;
4713         int vector_id, ret;
4714
4715         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4716                 return 0;
4717
4718         vector_id = hclge_get_vector_index(hdev, vector);
4719         if (vector_id < 0) {
4720                 dev_err(&handle->pdev->dev,
4721                         "Get vector index fail. ret =%d\n", vector_id);
4722                 return vector_id;
4723         }
4724
4725         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4726         if (ret)
4727                 dev_err(&handle->pdev->dev,
4728                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4729                         vector_id, ret);
4730
4731         return ret;
4732 }
4733
4734 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4735                                       struct hclge_promisc_param *param)
4736 {
4737         struct hclge_promisc_cfg_cmd *req;
4738         struct hclge_desc desc;
4739         int ret;
4740
4741         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4742
4743         req = (struct hclge_promisc_cfg_cmd *)desc.data;
4744         req->vf_id = param->vf_id;
4745
4746         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4747          * pdev revision(0x20), new revision support them. The
4748          * value of this two fields will not return error when driver
4749          * send command to fireware in revision(0x20).
4750          */
4751         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4752                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4753
4754         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4755         if (ret)
4756                 dev_err(&hdev->pdev->dev,
4757                         "Set promisc mode fail, status is %d.\n", ret);
4758
4759         return ret;
4760 }
4761
4762 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4763                                      bool en_uc, bool en_mc, bool en_bc,
4764                                      int vport_id)
4765 {
4766         if (!param)
4767                 return;
4768
4769         memset(param, 0, sizeof(struct hclge_promisc_param));
4770         if (en_uc)
4771                 param->enable = HCLGE_PROMISC_EN_UC;
4772         if (en_mc)
4773                 param->enable |= HCLGE_PROMISC_EN_MC;
4774         if (en_bc)
4775                 param->enable |= HCLGE_PROMISC_EN_BC;
4776         param->vf_id = vport_id;
4777 }
4778
4779 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4780                                  bool en_mc_pmc, bool en_bc_pmc)
4781 {
4782         struct hclge_dev *hdev = vport->back;
4783         struct hclge_promisc_param param;
4784
4785         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4786                                  vport->vport_id);
4787         return hclge_cmd_set_promisc_mode(hdev, &param);
4788 }
4789
4790 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4791                                   bool en_mc_pmc)
4792 {
4793         struct hclge_vport *vport = hclge_get_vport(handle);
4794         bool en_bc_pmc = true;
4795
4796         /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4797          * always bypassed. So broadcast promisc should be disabled until
4798          * user enable promisc mode
4799          */
4800         if (handle->pdev->revision == 0x20)
4801                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4802
4803         return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4804                                             en_bc_pmc);
4805 }
4806
4807 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4808 {
4809         struct hclge_get_fd_mode_cmd *req;
4810         struct hclge_desc desc;
4811         int ret;
4812
4813         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4814
4815         req = (struct hclge_get_fd_mode_cmd *)desc.data;
4816
4817         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4818         if (ret) {
4819                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4820                 return ret;
4821         }
4822
4823         *fd_mode = req->mode;
4824
4825         return ret;
4826 }
4827
4828 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4829                                    u32 *stage1_entry_num,
4830                                    u32 *stage2_entry_num,
4831                                    u16 *stage1_counter_num,
4832                                    u16 *stage2_counter_num)
4833 {
4834         struct hclge_get_fd_allocation_cmd *req;
4835         struct hclge_desc desc;
4836         int ret;
4837
4838         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4839
4840         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4841
4842         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4843         if (ret) {
4844                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4845                         ret);
4846                 return ret;
4847         }
4848
4849         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4850         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4851         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4852         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4853
4854         return ret;
4855 }
4856
4857 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4858 {
4859         struct hclge_set_fd_key_config_cmd *req;
4860         struct hclge_fd_key_cfg *stage;
4861         struct hclge_desc desc;
4862         int ret;
4863
4864         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4865
4866         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4867         stage = &hdev->fd_cfg.key_cfg[stage_num];
4868         req->stage = stage_num;
4869         req->key_select = stage->key_sel;
4870         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4871         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4872         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4873         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4874         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4875         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4876
4877         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4878         if (ret)
4879                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4880
4881         return ret;
4882 }
4883
4884 static int hclge_init_fd_config(struct hclge_dev *hdev)
4885 {
4886 #define LOW_2_WORDS             0x03
4887         struct hclge_fd_key_cfg *key_cfg;
4888         int ret;
4889
4890         if (!hnae3_dev_fd_supported(hdev))
4891                 return 0;
4892
4893         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4894         if (ret)
4895                 return ret;
4896
4897         switch (hdev->fd_cfg.fd_mode) {
4898         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4899                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4900                 break;
4901         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4902                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4903                 break;
4904         default:
4905                 dev_err(&hdev->pdev->dev,
4906                         "Unsupported flow director mode %u\n",
4907                         hdev->fd_cfg.fd_mode);
4908                 return -EOPNOTSUPP;
4909         }
4910
4911         hdev->fd_cfg.proto_support =
4912                 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4913                 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4914         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4915         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4916         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4917         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4918         key_cfg->outer_sipv6_word_en = 0;
4919         key_cfg->outer_dipv6_word_en = 0;
4920
4921         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4922                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4923                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4924                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4925
4926         /* If use max 400bit key, we can support tuples for ether type */
4927         if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4928                 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4929                 key_cfg->tuple_active |=
4930                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4931         }
4932
4933         /* roce_type is used to filter roce frames
4934          * dst_vport is used to specify the rule
4935          */
4936         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4937
4938         ret = hclge_get_fd_allocation(hdev,
4939                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4940                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4941                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4942                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4943         if (ret)
4944                 return ret;
4945
4946         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4947 }
4948
4949 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4950                                 int loc, u8 *key, bool is_add)
4951 {
4952         struct hclge_fd_tcam_config_1_cmd *req1;
4953         struct hclge_fd_tcam_config_2_cmd *req2;
4954         struct hclge_fd_tcam_config_3_cmd *req3;
4955         struct hclge_desc desc[3];
4956         int ret;
4957
4958         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4959         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4960         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4961         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4962         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4963
4964         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4965         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4966         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4967
4968         req1->stage = stage;
4969         req1->xy_sel = sel_x ? 1 : 0;
4970         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4971         req1->index = cpu_to_le32(loc);
4972         req1->entry_vld = sel_x ? is_add : 0;
4973
4974         if (key) {
4975                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4976                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4977                        sizeof(req2->tcam_data));
4978                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4979                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4980         }
4981
4982         ret = hclge_cmd_send(&hdev->hw, desc, 3);
4983         if (ret)
4984                 dev_err(&hdev->pdev->dev,
4985                         "config tcam key fail, ret=%d\n",
4986                         ret);
4987
4988         return ret;
4989 }
4990
4991 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4992                               struct hclge_fd_ad_data *action)
4993 {
4994         struct hclge_fd_ad_config_cmd *req;
4995         struct hclge_desc desc;
4996         u64 ad_data = 0;
4997         int ret;
4998
4999         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5000
5001         req = (struct hclge_fd_ad_config_cmd *)desc.data;
5002         req->index = cpu_to_le32(loc);
5003         req->stage = stage;
5004
5005         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5006                       action->write_rule_id_to_bd);
5007         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5008                         action->rule_id);
5009         ad_data <<= 32;
5010         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5011         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5012                       action->forward_to_direct_queue);
5013         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5014                         action->queue_id);
5015         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5016         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5017                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5018         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5019         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5020                         action->counter_id);
5021
5022         req->ad_data = cpu_to_le64(ad_data);
5023         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5024         if (ret)
5025                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5026
5027         return ret;
5028 }
5029
5030 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5031                                    struct hclge_fd_rule *rule)
5032 {
5033         u16 tmp_x_s, tmp_y_s;
5034         u32 tmp_x_l, tmp_y_l;
5035         int i;
5036
5037         if (rule->unused_tuple & tuple_bit)
5038                 return true;
5039
5040         switch (tuple_bit) {
5041         case 0:
5042                 return false;
5043         case BIT(INNER_DST_MAC):
5044                 for (i = 0; i < ETH_ALEN; i++) {
5045                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5046                                rule->tuples_mask.dst_mac[i]);
5047                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5048                                rule->tuples_mask.dst_mac[i]);
5049                 }
5050
5051                 return true;
5052         case BIT(INNER_SRC_MAC):
5053                 for (i = 0; i < ETH_ALEN; i++) {
5054                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5055                                rule->tuples.src_mac[i]);
5056                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5057                                rule->tuples.src_mac[i]);
5058                 }
5059
5060                 return true;
5061         case BIT(INNER_VLAN_TAG_FST):
5062                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5063                        rule->tuples_mask.vlan_tag1);
5064                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5065                        rule->tuples_mask.vlan_tag1);
5066                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5067                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5068
5069                 return true;
5070         case BIT(INNER_ETH_TYPE):
5071                 calc_x(tmp_x_s, rule->tuples.ether_proto,
5072                        rule->tuples_mask.ether_proto);
5073                 calc_y(tmp_y_s, rule->tuples.ether_proto,
5074                        rule->tuples_mask.ether_proto);
5075                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5076                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5077
5078                 return true;
5079         case BIT(INNER_IP_TOS):
5080                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5081                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5082
5083                 return true;
5084         case BIT(INNER_IP_PROTO):
5085                 calc_x(*key_x, rule->tuples.ip_proto,
5086                        rule->tuples_mask.ip_proto);
5087                 calc_y(*key_y, rule->tuples.ip_proto,
5088                        rule->tuples_mask.ip_proto);
5089
5090                 return true;
5091         case BIT(INNER_SRC_IP):
5092                 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5093                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5094                 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5095                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5096                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5097                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5098
5099                 return true;
5100         case BIT(INNER_DST_IP):
5101                 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5102                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5103                 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5104                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5105                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5106                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5107
5108                 return true;
5109         case BIT(INNER_SRC_PORT):
5110                 calc_x(tmp_x_s, rule->tuples.src_port,
5111                        rule->tuples_mask.src_port);
5112                 calc_y(tmp_y_s, rule->tuples.src_port,
5113                        rule->tuples_mask.src_port);
5114                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5115                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5116
5117                 return true;
5118         case BIT(INNER_DST_PORT):
5119                 calc_x(tmp_x_s, rule->tuples.dst_port,
5120                        rule->tuples_mask.dst_port);
5121                 calc_y(tmp_y_s, rule->tuples.dst_port,
5122                        rule->tuples_mask.dst_port);
5123                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5124                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5125
5126                 return true;
5127         default:
5128                 return false;
5129         }
5130 }
5131
5132 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5133                                  u8 vf_id, u8 network_port_id)
5134 {
5135         u32 port_number = 0;
5136
5137         if (port_type == HOST_PORT) {
5138                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5139                                 pf_id);
5140                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5141                                 vf_id);
5142                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5143         } else {
5144                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5145                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5146                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5147         }
5148
5149         return port_number;
5150 }
5151
5152 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5153                                        __le32 *key_x, __le32 *key_y,
5154                                        struct hclge_fd_rule *rule)
5155 {
5156         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5157         u8 cur_pos = 0, tuple_size, shift_bits;
5158         unsigned int i;
5159
5160         for (i = 0; i < MAX_META_DATA; i++) {
5161                 tuple_size = meta_data_key_info[i].key_length;
5162                 tuple_bit = key_cfg->meta_data_active & BIT(i);
5163
5164                 switch (tuple_bit) {
5165                 case BIT(ROCE_TYPE):
5166                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5167                         cur_pos += tuple_size;
5168                         break;
5169                 case BIT(DST_VPORT):
5170                         port_number = hclge_get_port_number(HOST_PORT, 0,
5171                                                             rule->vf_id, 0);
5172                         hnae3_set_field(meta_data,
5173                                         GENMASK(cur_pos + tuple_size, cur_pos),
5174                                         cur_pos, port_number);
5175                         cur_pos += tuple_size;
5176                         break;
5177                 default:
5178                         break;
5179                 }
5180         }
5181
5182         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5183         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5184         shift_bits = sizeof(meta_data) * 8 - cur_pos;
5185
5186         *key_x = cpu_to_le32(tmp_x << shift_bits);
5187         *key_y = cpu_to_le32(tmp_y << shift_bits);
5188 }
5189
5190 /* A complete key is combined with meta data key and tuple key.
5191  * Meta data key is stored at the MSB region, and tuple key is stored at
5192  * the LSB region, unused bits will be filled 0.
5193  */
5194 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5195                             struct hclge_fd_rule *rule)
5196 {
5197         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5198         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5199         u8 *cur_key_x, *cur_key_y;
5200         unsigned int i;
5201         int ret, tuple_size;
5202         u8 meta_data_region;
5203
5204         memset(key_x, 0, sizeof(key_x));
5205         memset(key_y, 0, sizeof(key_y));
5206         cur_key_x = key_x;
5207         cur_key_y = key_y;
5208
5209         for (i = 0 ; i < MAX_TUPLE; i++) {
5210                 bool tuple_valid;
5211                 u32 check_tuple;
5212
5213                 tuple_size = tuple_key_info[i].key_length / 8;
5214                 check_tuple = key_cfg->tuple_active & BIT(i);
5215
5216                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5217                                                      cur_key_y, rule);
5218                 if (tuple_valid) {
5219                         cur_key_x += tuple_size;
5220                         cur_key_y += tuple_size;
5221                 }
5222         }
5223
5224         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5225                         MAX_META_DATA_LENGTH / 8;
5226
5227         hclge_fd_convert_meta_data(key_cfg,
5228                                    (__le32 *)(key_x + meta_data_region),
5229                                    (__le32 *)(key_y + meta_data_region),
5230                                    rule);
5231
5232         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5233                                    true);
5234         if (ret) {
5235                 dev_err(&hdev->pdev->dev,
5236                         "fd key_y config fail, loc=%u, ret=%d\n",
5237                         rule->queue_id, ret);
5238                 return ret;
5239         }
5240
5241         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5242                                    true);
5243         if (ret)
5244                 dev_err(&hdev->pdev->dev,
5245                         "fd key_x config fail, loc=%u, ret=%d\n",
5246                         rule->queue_id, ret);
5247         return ret;
5248 }
5249
5250 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5251                                struct hclge_fd_rule *rule)
5252 {
5253         struct hclge_fd_ad_data ad_data;
5254
5255         ad_data.ad_id = rule->location;
5256
5257         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5258                 ad_data.drop_packet = true;
5259                 ad_data.forward_to_direct_queue = false;
5260                 ad_data.queue_id = 0;
5261         } else {
5262                 ad_data.drop_packet = false;
5263                 ad_data.forward_to_direct_queue = true;
5264                 ad_data.queue_id = rule->queue_id;
5265         }
5266
5267         ad_data.use_counter = false;
5268         ad_data.counter_id = 0;
5269
5270         ad_data.use_next_stage = false;
5271         ad_data.next_input_key = 0;
5272
5273         ad_data.write_rule_id_to_bd = true;
5274         ad_data.rule_id = rule->location;
5275
5276         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5277 }
5278
5279 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5280                                struct ethtool_rx_flow_spec *fs, u32 *unused)
5281 {
5282         struct ethtool_tcpip4_spec *tcp_ip4_spec;
5283         struct ethtool_usrip4_spec *usr_ip4_spec;
5284         struct ethtool_tcpip6_spec *tcp_ip6_spec;
5285         struct ethtool_usrip6_spec *usr_ip6_spec;
5286         struct ethhdr *ether_spec;
5287
5288         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5289                 return -EINVAL;
5290
5291         if (!(fs->flow_type & hdev->fd_cfg.proto_support))
5292                 return -EOPNOTSUPP;
5293
5294         if ((fs->flow_type & FLOW_EXT) &&
5295             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5296                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5297                 return -EOPNOTSUPP;
5298         }
5299
5300         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5301         case SCTP_V4_FLOW:
5302         case TCP_V4_FLOW:
5303         case UDP_V4_FLOW:
5304                 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
5305                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5306
5307                 if (!tcp_ip4_spec->ip4src)
5308                         *unused |= BIT(INNER_SRC_IP);
5309
5310                 if (!tcp_ip4_spec->ip4dst)
5311                         *unused |= BIT(INNER_DST_IP);
5312
5313                 if (!tcp_ip4_spec->psrc)
5314                         *unused |= BIT(INNER_SRC_PORT);
5315
5316                 if (!tcp_ip4_spec->pdst)
5317                         *unused |= BIT(INNER_DST_PORT);
5318
5319                 if (!tcp_ip4_spec->tos)
5320                         *unused |= BIT(INNER_IP_TOS);
5321
5322                 break;
5323         case IP_USER_FLOW:
5324                 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
5325                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5326                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5327
5328                 if (!usr_ip4_spec->ip4src)
5329                         *unused |= BIT(INNER_SRC_IP);
5330
5331                 if (!usr_ip4_spec->ip4dst)
5332                         *unused |= BIT(INNER_DST_IP);
5333
5334                 if (!usr_ip4_spec->tos)
5335                         *unused |= BIT(INNER_IP_TOS);
5336
5337                 if (!usr_ip4_spec->proto)
5338                         *unused |= BIT(INNER_IP_PROTO);
5339
5340                 if (usr_ip4_spec->l4_4_bytes)
5341                         return -EOPNOTSUPP;
5342
5343                 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
5344                         return -EOPNOTSUPP;
5345
5346                 break;
5347         case SCTP_V6_FLOW:
5348         case TCP_V6_FLOW:
5349         case UDP_V6_FLOW:
5350                 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
5351                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5352                         BIT(INNER_IP_TOS);
5353
5354                 /* check whether src/dst ip address used */
5355                 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
5356                     !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
5357                         *unused |= BIT(INNER_SRC_IP);
5358
5359                 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
5360                     !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
5361                         *unused |= BIT(INNER_DST_IP);
5362
5363                 if (!tcp_ip6_spec->psrc)
5364                         *unused |= BIT(INNER_SRC_PORT);
5365
5366                 if (!tcp_ip6_spec->pdst)
5367                         *unused |= BIT(INNER_DST_PORT);
5368
5369                 if (tcp_ip6_spec->tclass)
5370                         return -EOPNOTSUPP;
5371
5372                 break;
5373         case IPV6_USER_FLOW:
5374                 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
5375                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5376                         BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
5377                         BIT(INNER_DST_PORT);
5378
5379                 /* check whether src/dst ip address used */
5380                 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
5381                     !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
5382                         *unused |= BIT(INNER_SRC_IP);
5383
5384                 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
5385                     !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
5386                         *unused |= BIT(INNER_DST_IP);
5387
5388                 if (!usr_ip6_spec->l4_proto)
5389                         *unused |= BIT(INNER_IP_PROTO);
5390
5391                 if (usr_ip6_spec->tclass)
5392                         return -EOPNOTSUPP;
5393
5394                 if (usr_ip6_spec->l4_4_bytes)
5395                         return -EOPNOTSUPP;
5396
5397                 break;
5398         case ETHER_FLOW:
5399                 ether_spec = &fs->h_u.ether_spec;
5400                 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5401                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5402                         BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5403
5404                 if (is_zero_ether_addr(ether_spec->h_source))
5405                         *unused |= BIT(INNER_SRC_MAC);
5406
5407                 if (is_zero_ether_addr(ether_spec->h_dest))
5408                         *unused |= BIT(INNER_DST_MAC);
5409
5410                 if (!ether_spec->h_proto)
5411                         *unused |= BIT(INNER_ETH_TYPE);
5412
5413                 break;
5414         default:
5415                 return -EOPNOTSUPP;
5416         }
5417
5418         if ((fs->flow_type & FLOW_EXT)) {
5419                 if (fs->h_ext.vlan_etype)
5420                         return -EOPNOTSUPP;
5421                 if (!fs->h_ext.vlan_tci)
5422                         *unused |= BIT(INNER_VLAN_TAG_FST);
5423
5424                 if (fs->m_ext.vlan_tci) {
5425                         if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5426                                 return -EINVAL;
5427                 }
5428         } else {
5429                 *unused |= BIT(INNER_VLAN_TAG_FST);
5430         }
5431
5432         if (fs->flow_type & FLOW_MAC_EXT) {
5433                 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5434                         return -EOPNOTSUPP;
5435
5436                 if (is_zero_ether_addr(fs->h_ext.h_dest))
5437                         *unused |= BIT(INNER_DST_MAC);
5438                 else
5439                         *unused &= ~(BIT(INNER_DST_MAC));
5440         }
5441
5442         return 0;
5443 }
5444
5445 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5446 {
5447         struct hclge_fd_rule *rule = NULL;
5448         struct hlist_node *node2;
5449
5450         spin_lock_bh(&hdev->fd_rule_lock);
5451         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5452                 if (rule->location >= location)
5453                         break;
5454         }
5455
5456         spin_unlock_bh(&hdev->fd_rule_lock);
5457
5458         return  rule && rule->location == location;
5459 }
5460
5461 /* make sure being called after lock up with fd_rule_lock */
5462 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5463                                      struct hclge_fd_rule *new_rule,
5464                                      u16 location,
5465                                      bool is_add)
5466 {
5467         struct hclge_fd_rule *rule = NULL, *parent = NULL;
5468         struct hlist_node *node2;
5469
5470         if (is_add && !new_rule)
5471                 return -EINVAL;
5472
5473         hlist_for_each_entry_safe(rule, node2,
5474                                   &hdev->fd_rule_list, rule_node) {
5475                 if (rule->location >= location)
5476                         break;
5477                 parent = rule;
5478         }
5479
5480         if (rule && rule->location == location) {
5481                 hlist_del(&rule->rule_node);
5482                 kfree(rule);
5483                 hdev->hclge_fd_rule_num--;
5484
5485                 if (!is_add) {
5486                         if (!hdev->hclge_fd_rule_num)
5487                                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5488                         clear_bit(location, hdev->fd_bmap);
5489
5490                         return 0;
5491                 }
5492         } else if (!is_add) {
5493                 dev_err(&hdev->pdev->dev,
5494                         "delete fail, rule %u is inexistent\n",
5495                         location);
5496                 return -EINVAL;
5497         }
5498
5499         INIT_HLIST_NODE(&new_rule->rule_node);
5500
5501         if (parent)
5502                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5503         else
5504                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5505
5506         set_bit(location, hdev->fd_bmap);
5507         hdev->hclge_fd_rule_num++;
5508         hdev->fd_active_type = new_rule->rule_type;
5509
5510         return 0;
5511 }
5512
5513 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5514                               struct ethtool_rx_flow_spec *fs,
5515                               struct hclge_fd_rule *rule)
5516 {
5517         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5518
5519         switch (flow_type) {
5520         case SCTP_V4_FLOW:
5521         case TCP_V4_FLOW:
5522         case UDP_V4_FLOW:
5523                 rule->tuples.src_ip[IPV4_INDEX] =
5524                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5525                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5526                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5527
5528                 rule->tuples.dst_ip[IPV4_INDEX] =
5529                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5530                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5531                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5532
5533                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5534                 rule->tuples_mask.src_port =
5535                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5536
5537                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5538                 rule->tuples_mask.dst_port =
5539                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5540
5541                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5542                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5543
5544                 rule->tuples.ether_proto = ETH_P_IP;
5545                 rule->tuples_mask.ether_proto = 0xFFFF;
5546
5547                 break;
5548         case IP_USER_FLOW:
5549                 rule->tuples.src_ip[IPV4_INDEX] =
5550                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5551                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5552                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5553
5554                 rule->tuples.dst_ip[IPV4_INDEX] =
5555                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5556                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5557                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5558
5559                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5560                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5561
5562                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5563                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5564
5565                 rule->tuples.ether_proto = ETH_P_IP;
5566                 rule->tuples_mask.ether_proto = 0xFFFF;
5567
5568                 break;
5569         case SCTP_V6_FLOW:
5570         case TCP_V6_FLOW:
5571         case UDP_V6_FLOW:
5572                 be32_to_cpu_array(rule->tuples.src_ip,
5573                                   fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5574                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5575                                   fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5576
5577                 be32_to_cpu_array(rule->tuples.dst_ip,
5578                                   fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5579                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5580                                   fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5581
5582                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5583                 rule->tuples_mask.src_port =
5584                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5585
5586                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5587                 rule->tuples_mask.dst_port =
5588                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5589
5590                 rule->tuples.ether_proto = ETH_P_IPV6;
5591                 rule->tuples_mask.ether_proto = 0xFFFF;
5592
5593                 break;
5594         case IPV6_USER_FLOW:
5595                 be32_to_cpu_array(rule->tuples.src_ip,
5596                                   fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5597                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5598                                   fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5599
5600                 be32_to_cpu_array(rule->tuples.dst_ip,
5601                                   fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5602                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5603                                   fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5604
5605                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5606                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5607
5608                 rule->tuples.ether_proto = ETH_P_IPV6;
5609                 rule->tuples_mask.ether_proto = 0xFFFF;
5610
5611                 break;
5612         case ETHER_FLOW:
5613                 ether_addr_copy(rule->tuples.src_mac,
5614                                 fs->h_u.ether_spec.h_source);
5615                 ether_addr_copy(rule->tuples_mask.src_mac,
5616                                 fs->m_u.ether_spec.h_source);
5617
5618                 ether_addr_copy(rule->tuples.dst_mac,
5619                                 fs->h_u.ether_spec.h_dest);
5620                 ether_addr_copy(rule->tuples_mask.dst_mac,
5621                                 fs->m_u.ether_spec.h_dest);
5622
5623                 rule->tuples.ether_proto =
5624                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5625                 rule->tuples_mask.ether_proto =
5626                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5627
5628                 break;
5629         default:
5630                 return -EOPNOTSUPP;
5631         }
5632
5633         switch (flow_type) {
5634         case SCTP_V4_FLOW:
5635         case SCTP_V6_FLOW:
5636                 rule->tuples.ip_proto = IPPROTO_SCTP;
5637                 rule->tuples_mask.ip_proto = 0xFF;
5638                 break;
5639         case TCP_V4_FLOW:
5640         case TCP_V6_FLOW:
5641                 rule->tuples.ip_proto = IPPROTO_TCP;
5642                 rule->tuples_mask.ip_proto = 0xFF;
5643                 break;
5644         case UDP_V4_FLOW:
5645         case UDP_V6_FLOW:
5646                 rule->tuples.ip_proto = IPPROTO_UDP;
5647                 rule->tuples_mask.ip_proto = 0xFF;
5648                 break;
5649         default:
5650                 break;
5651         }
5652
5653         if ((fs->flow_type & FLOW_EXT)) {
5654                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5655                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5656         }
5657
5658         if (fs->flow_type & FLOW_MAC_EXT) {
5659                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5660                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5661         }
5662
5663         return 0;
5664 }
5665
5666 /* make sure being called after lock up with fd_rule_lock */
5667 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5668                                 struct hclge_fd_rule *rule)
5669 {
5670         int ret;
5671
5672         if (!rule) {
5673                 dev_err(&hdev->pdev->dev,
5674                         "The flow director rule is NULL\n");
5675                 return -EINVAL;
5676         }
5677
5678         /* it will never fail here, so needn't to check return value */
5679         hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5680
5681         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5682         if (ret)
5683                 goto clear_rule;
5684
5685         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5686         if (ret)
5687                 goto clear_rule;
5688
5689         return 0;
5690
5691 clear_rule:
5692         hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5693         return ret;
5694 }
5695
5696 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5697                               struct ethtool_rxnfc *cmd)
5698 {
5699         struct hclge_vport *vport = hclge_get_vport(handle);
5700         struct hclge_dev *hdev = vport->back;
5701         u16 dst_vport_id = 0, q_index = 0;
5702         struct ethtool_rx_flow_spec *fs;
5703         struct hclge_fd_rule *rule;
5704         u32 unused = 0;
5705         u8 action;
5706         int ret;
5707
5708         if (!hnae3_dev_fd_supported(hdev))
5709                 return -EOPNOTSUPP;
5710
5711         if (!hdev->fd_en) {
5712                 dev_warn(&hdev->pdev->dev,
5713                          "Please enable flow director first\n");
5714                 return -EOPNOTSUPP;
5715         }
5716
5717         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5718
5719         ret = hclge_fd_check_spec(hdev, fs, &unused);
5720         if (ret) {
5721                 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5722                 return ret;
5723         }
5724
5725         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5726                 action = HCLGE_FD_ACTION_DROP_PACKET;
5727         } else {
5728                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5729                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5730                 u16 tqps;
5731
5732                 if (vf > hdev->num_req_vfs) {
5733                         dev_err(&hdev->pdev->dev,
5734                                 "Error: vf id (%u) > max vf num (%u)\n",
5735                                 vf, hdev->num_req_vfs);
5736                         return -EINVAL;
5737                 }
5738
5739                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5740                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5741
5742                 if (ring >= tqps) {
5743                         dev_err(&hdev->pdev->dev,
5744                                 "Error: queue id (%u) > max tqp num (%u)\n",
5745                                 ring, tqps - 1);
5746                         return -EINVAL;
5747                 }
5748
5749                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5750                 q_index = ring;
5751         }
5752
5753         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5754         if (!rule)
5755                 return -ENOMEM;
5756
5757         ret = hclge_fd_get_tuple(hdev, fs, rule);
5758         if (ret) {
5759                 kfree(rule);
5760                 return ret;
5761         }
5762
5763         rule->flow_type = fs->flow_type;
5764
5765         rule->location = fs->location;
5766         rule->unused_tuple = unused;
5767         rule->vf_id = dst_vport_id;
5768         rule->queue_id = q_index;
5769         rule->action = action;
5770         rule->rule_type = HCLGE_FD_EP_ACTIVE;
5771
5772         /* to avoid rule conflict, when user configure rule by ethtool,
5773          * we need to clear all arfs rules
5774          */
5775         hclge_clear_arfs_rules(handle);
5776
5777         spin_lock_bh(&hdev->fd_rule_lock);
5778         ret = hclge_fd_config_rule(hdev, rule);
5779
5780         spin_unlock_bh(&hdev->fd_rule_lock);
5781
5782         return ret;
5783 }
5784
5785 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5786                               struct ethtool_rxnfc *cmd)
5787 {
5788         struct hclge_vport *vport = hclge_get_vport(handle);
5789         struct hclge_dev *hdev = vport->back;
5790         struct ethtool_rx_flow_spec *fs;
5791         int ret;
5792
5793         if (!hnae3_dev_fd_supported(hdev))
5794                 return -EOPNOTSUPP;
5795
5796         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5797
5798         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5799                 return -EINVAL;
5800
5801         if (!hclge_fd_rule_exist(hdev, fs->location)) {
5802                 dev_err(&hdev->pdev->dev,
5803                         "Delete fail, rule %u is inexistent\n", fs->location);
5804                 return -ENOENT;
5805         }
5806
5807         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5808                                    NULL, false);
5809         if (ret)
5810                 return ret;
5811
5812         spin_lock_bh(&hdev->fd_rule_lock);
5813         ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5814
5815         spin_unlock_bh(&hdev->fd_rule_lock);
5816
5817         return ret;
5818 }
5819
5820 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5821                                      bool clear_list)
5822 {
5823         struct hclge_vport *vport = hclge_get_vport(handle);
5824         struct hclge_dev *hdev = vport->back;
5825         struct hclge_fd_rule *rule;
5826         struct hlist_node *node;
5827         u16 location;
5828
5829         if (!hnae3_dev_fd_supported(hdev))
5830                 return;
5831
5832         spin_lock_bh(&hdev->fd_rule_lock);
5833         for_each_set_bit(location, hdev->fd_bmap,
5834                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5835                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5836                                      NULL, false);
5837
5838         if (clear_list) {
5839                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5840                                           rule_node) {
5841                         hlist_del(&rule->rule_node);
5842                         kfree(rule);
5843                 }
5844                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5845                 hdev->hclge_fd_rule_num = 0;
5846                 bitmap_zero(hdev->fd_bmap,
5847                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5848         }
5849
5850         spin_unlock_bh(&hdev->fd_rule_lock);
5851 }
5852
5853 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5854 {
5855         struct hclge_vport *vport = hclge_get_vport(handle);
5856         struct hclge_dev *hdev = vport->back;
5857         struct hclge_fd_rule *rule;
5858         struct hlist_node *node;
5859         int ret;
5860
5861         /* Return ok here, because reset error handling will check this
5862          * return value. If error is returned here, the reset process will
5863          * fail.
5864          */
5865         if (!hnae3_dev_fd_supported(hdev))
5866                 return 0;
5867
5868         /* if fd is disabled, should not restore it when reset */
5869         if (!hdev->fd_en)
5870                 return 0;
5871
5872         spin_lock_bh(&hdev->fd_rule_lock);
5873         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5874                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5875                 if (!ret)
5876                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5877
5878                 if (ret) {
5879                         dev_warn(&hdev->pdev->dev,
5880                                  "Restore rule %u failed, remove it\n",
5881                                  rule->location);
5882                         clear_bit(rule->location, hdev->fd_bmap);
5883                         hlist_del(&rule->rule_node);
5884                         kfree(rule);
5885                         hdev->hclge_fd_rule_num--;
5886                 }
5887         }
5888
5889         if (hdev->hclge_fd_rule_num)
5890                 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5891
5892         spin_unlock_bh(&hdev->fd_rule_lock);
5893
5894         return 0;
5895 }
5896
5897 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5898                                  struct ethtool_rxnfc *cmd)
5899 {
5900         struct hclge_vport *vport = hclge_get_vport(handle);
5901         struct hclge_dev *hdev = vport->back;
5902
5903         if (!hnae3_dev_fd_supported(hdev))
5904                 return -EOPNOTSUPP;
5905
5906         cmd->rule_cnt = hdev->hclge_fd_rule_num;
5907         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5908
5909         return 0;
5910 }
5911
5912 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5913                                   struct ethtool_rxnfc *cmd)
5914 {
5915         struct hclge_vport *vport = hclge_get_vport(handle);
5916         struct hclge_fd_rule *rule = NULL;
5917         struct hclge_dev *hdev = vport->back;
5918         struct ethtool_rx_flow_spec *fs;
5919         struct hlist_node *node2;
5920
5921         if (!hnae3_dev_fd_supported(hdev))
5922                 return -EOPNOTSUPP;
5923
5924         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5925
5926         spin_lock_bh(&hdev->fd_rule_lock);
5927
5928         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5929                 if (rule->location >= fs->location)
5930                         break;
5931         }
5932
5933         if (!rule || fs->location != rule->location) {
5934                 spin_unlock_bh(&hdev->fd_rule_lock);
5935
5936                 return -ENOENT;
5937         }
5938
5939         fs->flow_type = rule->flow_type;
5940         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5941         case SCTP_V4_FLOW:
5942         case TCP_V4_FLOW:
5943         case UDP_V4_FLOW:
5944                 fs->h_u.tcp_ip4_spec.ip4src =
5945                                 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5946                 fs->m_u.tcp_ip4_spec.ip4src =
5947                         rule->unused_tuple & BIT(INNER_SRC_IP) ?
5948                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5949
5950                 fs->h_u.tcp_ip4_spec.ip4dst =
5951                                 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5952                 fs->m_u.tcp_ip4_spec.ip4dst =
5953                         rule->unused_tuple & BIT(INNER_DST_IP) ?
5954                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5955
5956                 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5957                 fs->m_u.tcp_ip4_spec.psrc =
5958                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5959                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5960
5961                 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5962                 fs->m_u.tcp_ip4_spec.pdst =
5963                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5964                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5965
5966                 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5967                 fs->m_u.tcp_ip4_spec.tos =
5968                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5969                                 0 : rule->tuples_mask.ip_tos;
5970
5971                 break;
5972         case IP_USER_FLOW:
5973                 fs->h_u.usr_ip4_spec.ip4src =
5974                                 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5975                 fs->m_u.tcp_ip4_spec.ip4src =
5976                         rule->unused_tuple & BIT(INNER_SRC_IP) ?
5977                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5978
5979                 fs->h_u.usr_ip4_spec.ip4dst =
5980                                 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5981                 fs->m_u.usr_ip4_spec.ip4dst =
5982                         rule->unused_tuple & BIT(INNER_DST_IP) ?
5983                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5984
5985                 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5986                 fs->m_u.usr_ip4_spec.tos =
5987                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5988                                 0 : rule->tuples_mask.ip_tos;
5989
5990                 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5991                 fs->m_u.usr_ip4_spec.proto =
5992                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5993                                 0 : rule->tuples_mask.ip_proto;
5994
5995                 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5996
5997                 break;
5998         case SCTP_V6_FLOW:
5999         case TCP_V6_FLOW:
6000         case UDP_V6_FLOW:
6001                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
6002                                   rule->tuples.src_ip, IPV6_SIZE);
6003                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6004                         memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
6005                                sizeof(int) * IPV6_SIZE);
6006                 else
6007                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
6008                                           rule->tuples_mask.src_ip, IPV6_SIZE);
6009
6010                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
6011                                   rule->tuples.dst_ip, IPV6_SIZE);
6012                 if (rule->unused_tuple & BIT(INNER_DST_IP))
6013                         memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
6014                                sizeof(int) * IPV6_SIZE);
6015                 else
6016                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
6017                                           rule->tuples_mask.dst_ip, IPV6_SIZE);
6018
6019                 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
6020                 fs->m_u.tcp_ip6_spec.psrc =
6021                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6022                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
6023
6024                 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
6025                 fs->m_u.tcp_ip6_spec.pdst =
6026                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
6027                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6028
6029                 break;
6030         case IPV6_USER_FLOW:
6031                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
6032                                   rule->tuples.src_ip, IPV6_SIZE);
6033                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6034                         memset(fs->m_u.usr_ip6_spec.ip6src, 0,
6035                                sizeof(int) * IPV6_SIZE);
6036                 else
6037                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
6038                                           rule->tuples_mask.src_ip, IPV6_SIZE);
6039
6040                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
6041                                   rule->tuples.dst_ip, IPV6_SIZE);
6042                 if (rule->unused_tuple & BIT(INNER_DST_IP))
6043                         memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
6044                                sizeof(int) * IPV6_SIZE);
6045                 else
6046                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
6047                                           rule->tuples_mask.dst_ip, IPV6_SIZE);
6048
6049                 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
6050                 fs->m_u.usr_ip6_spec.l4_proto =
6051                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6052                                 0 : rule->tuples_mask.ip_proto;
6053
6054                 break;
6055         case ETHER_FLOW:
6056                 ether_addr_copy(fs->h_u.ether_spec.h_source,
6057                                 rule->tuples.src_mac);
6058                 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6059                         eth_zero_addr(fs->m_u.ether_spec.h_source);
6060                 else
6061                         ether_addr_copy(fs->m_u.ether_spec.h_source,
6062                                         rule->tuples_mask.src_mac);
6063
6064                 ether_addr_copy(fs->h_u.ether_spec.h_dest,
6065                                 rule->tuples.dst_mac);
6066                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6067                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
6068                 else
6069                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
6070                                         rule->tuples_mask.dst_mac);
6071
6072                 fs->h_u.ether_spec.h_proto =
6073                                 cpu_to_be16(rule->tuples.ether_proto);
6074                 fs->m_u.ether_spec.h_proto =
6075                                 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6076                                 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6077
6078                 break;
6079         default:
6080                 spin_unlock_bh(&hdev->fd_rule_lock);
6081                 return -EOPNOTSUPP;
6082         }
6083
6084         if (fs->flow_type & FLOW_EXT) {
6085                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6086                 fs->m_ext.vlan_tci =
6087                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6088                                 cpu_to_be16(VLAN_VID_MASK) :
6089                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
6090         }
6091
6092         if (fs->flow_type & FLOW_MAC_EXT) {
6093                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6094                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6095                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
6096                 else
6097                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
6098                                         rule->tuples_mask.dst_mac);
6099         }
6100
6101         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6102                 fs->ring_cookie = RX_CLS_FLOW_DISC;
6103         } else {
6104                 u64 vf_id;
6105
6106                 fs->ring_cookie = rule->queue_id;
6107                 vf_id = rule->vf_id;
6108                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6109                 fs->ring_cookie |= vf_id;
6110         }
6111
6112         spin_unlock_bh(&hdev->fd_rule_lock);
6113
6114         return 0;
6115 }
6116
6117 static int hclge_get_all_rules(struct hnae3_handle *handle,
6118                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
6119 {
6120         struct hclge_vport *vport = hclge_get_vport(handle);
6121         struct hclge_dev *hdev = vport->back;
6122         struct hclge_fd_rule *rule;
6123         struct hlist_node *node2;
6124         int cnt = 0;
6125
6126         if (!hnae3_dev_fd_supported(hdev))
6127                 return -EOPNOTSUPP;
6128
6129         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6130
6131         spin_lock_bh(&hdev->fd_rule_lock);
6132         hlist_for_each_entry_safe(rule, node2,
6133                                   &hdev->fd_rule_list, rule_node) {
6134                 if (cnt == cmd->rule_cnt) {
6135                         spin_unlock_bh(&hdev->fd_rule_lock);
6136                         return -EMSGSIZE;
6137                 }
6138
6139                 rule_locs[cnt] = rule->location;
6140                 cnt++;
6141         }
6142
6143         spin_unlock_bh(&hdev->fd_rule_lock);
6144
6145         cmd->rule_cnt = cnt;
6146
6147         return 0;
6148 }
6149
6150 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6151                                      struct hclge_fd_rule_tuples *tuples)
6152 {
6153         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6154         tuples->ip_proto = fkeys->basic.ip_proto;
6155         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6156
6157         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6158                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6159                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6160         } else {
6161                 memcpy(tuples->src_ip,
6162                        fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
6163                        sizeof(tuples->src_ip));
6164                 memcpy(tuples->dst_ip,
6165                        fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
6166                        sizeof(tuples->dst_ip));
6167         }
6168 }
6169
6170 /* traverse all rules, check whether an existed rule has the same tuples */
6171 static struct hclge_fd_rule *
6172 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6173                           const struct hclge_fd_rule_tuples *tuples)
6174 {
6175         struct hclge_fd_rule *rule = NULL;
6176         struct hlist_node *node;
6177
6178         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6179                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6180                         return rule;
6181         }
6182
6183         return NULL;
6184 }
6185
6186 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6187                                      struct hclge_fd_rule *rule)
6188 {
6189         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6190                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6191                              BIT(INNER_SRC_PORT);
6192         rule->action = 0;
6193         rule->vf_id = 0;
6194         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6195         if (tuples->ether_proto == ETH_P_IP) {
6196                 if (tuples->ip_proto == IPPROTO_TCP)
6197                         rule->flow_type = TCP_V4_FLOW;
6198                 else
6199                         rule->flow_type = UDP_V4_FLOW;
6200         } else {
6201                 if (tuples->ip_proto == IPPROTO_TCP)
6202                         rule->flow_type = TCP_V6_FLOW;
6203                 else
6204                         rule->flow_type = UDP_V6_FLOW;
6205         }
6206         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6207         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6208 }
6209
6210 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6211                                       u16 flow_id, struct flow_keys *fkeys)
6212 {
6213         struct hclge_vport *vport = hclge_get_vport(handle);
6214         struct hclge_fd_rule_tuples new_tuples;
6215         struct hclge_dev *hdev = vport->back;
6216         struct hclge_fd_rule *rule;
6217         u16 tmp_queue_id;
6218         u16 bit_id;
6219         int ret;
6220
6221         if (!hnae3_dev_fd_supported(hdev))
6222                 return -EOPNOTSUPP;
6223
6224         memset(&new_tuples, 0, sizeof(new_tuples));
6225         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6226
6227         spin_lock_bh(&hdev->fd_rule_lock);
6228
6229         /* when there is already fd rule existed add by user,
6230          * arfs should not work
6231          */
6232         if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6233                 spin_unlock_bh(&hdev->fd_rule_lock);
6234
6235                 return -EOPNOTSUPP;
6236         }
6237
6238         /* check is there flow director filter existed for this flow,
6239          * if not, create a new filter for it;
6240          * if filter exist with different queue id, modify the filter;
6241          * if filter exist with same queue id, do nothing
6242          */
6243         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6244         if (!rule) {
6245                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6246                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6247                         spin_unlock_bh(&hdev->fd_rule_lock);
6248
6249                         return -ENOSPC;
6250                 }
6251
6252                 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6253                 if (!rule) {
6254                         spin_unlock_bh(&hdev->fd_rule_lock);
6255
6256                         return -ENOMEM;
6257                 }
6258
6259                 set_bit(bit_id, hdev->fd_bmap);
6260                 rule->location = bit_id;
6261                 rule->flow_id = flow_id;
6262                 rule->queue_id = queue_id;
6263                 hclge_fd_build_arfs_rule(&new_tuples, rule);
6264                 ret = hclge_fd_config_rule(hdev, rule);
6265
6266                 spin_unlock_bh(&hdev->fd_rule_lock);
6267
6268                 if (ret)
6269                         return ret;
6270
6271                 return rule->location;
6272         }
6273
6274         spin_unlock_bh(&hdev->fd_rule_lock);
6275
6276         if (rule->queue_id == queue_id)
6277                 return rule->location;
6278
6279         tmp_queue_id = rule->queue_id;
6280         rule->queue_id = queue_id;
6281         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6282         if (ret) {
6283                 rule->queue_id = tmp_queue_id;
6284                 return ret;
6285         }
6286
6287         return rule->location;
6288 }
6289
6290 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6291 {
6292 #ifdef CONFIG_RFS_ACCEL
6293         struct hnae3_handle *handle = &hdev->vport[0].nic;
6294         struct hclge_fd_rule *rule;
6295         struct hlist_node *node;
6296         HLIST_HEAD(del_list);
6297
6298         spin_lock_bh(&hdev->fd_rule_lock);
6299         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6300                 spin_unlock_bh(&hdev->fd_rule_lock);
6301                 return;
6302         }
6303         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6304                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6305                                         rule->flow_id, rule->location)) {
6306                         hlist_del_init(&rule->rule_node);
6307                         hlist_add_head(&rule->rule_node, &del_list);
6308                         hdev->hclge_fd_rule_num--;
6309                         clear_bit(rule->location, hdev->fd_bmap);
6310                 }
6311         }
6312         spin_unlock_bh(&hdev->fd_rule_lock);
6313
6314         hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6315                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6316                                      rule->location, NULL, false);
6317                 kfree(rule);
6318         }
6319 #endif
6320 }
6321
6322 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6323 {
6324 #ifdef CONFIG_RFS_ACCEL
6325         struct hclge_vport *vport = hclge_get_vport(handle);
6326         struct hclge_dev *hdev = vport->back;
6327
6328         if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6329                 hclge_del_all_fd_entries(handle, true);
6330 #endif
6331 }
6332
6333 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6334 {
6335         struct hclge_vport *vport = hclge_get_vport(handle);
6336         struct hclge_dev *hdev = vport->back;
6337
6338         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6339                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6340 }
6341
6342 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6343 {
6344         struct hclge_vport *vport = hclge_get_vport(handle);
6345         struct hclge_dev *hdev = vport->back;
6346
6347         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6348 }
6349
6350 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6351 {
6352         struct hclge_vport *vport = hclge_get_vport(handle);
6353         struct hclge_dev *hdev = vport->back;
6354
6355         return hdev->rst_stats.hw_reset_done_cnt;
6356 }
6357
6358 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6359 {
6360         struct hclge_vport *vport = hclge_get_vport(handle);
6361         struct hclge_dev *hdev = vport->back;
6362         bool clear;
6363
6364         hdev->fd_en = enable;
6365         clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6366         if (!enable)
6367                 hclge_del_all_fd_entries(handle, clear);
6368         else
6369                 hclge_restore_fd_entries(handle);
6370 }
6371
6372 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6373 {
6374         struct hclge_desc desc;
6375         struct hclge_config_mac_mode_cmd *req =
6376                 (struct hclge_config_mac_mode_cmd *)desc.data;
6377         u32 loop_en = 0;
6378         int ret;
6379
6380         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6381
6382         if (enable) {
6383                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6384                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6385                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6386                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6387                 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6388                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6389                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6390                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6391                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6392                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6393         }
6394
6395         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6396
6397         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6398         if (ret)
6399                 dev_err(&hdev->pdev->dev,
6400                         "mac enable fail, ret =%d.\n", ret);
6401 }
6402
6403 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6404                                      u8 switch_param, u8 param_mask)
6405 {
6406         struct hclge_mac_vlan_switch_cmd *req;
6407         struct hclge_desc desc;
6408         u32 func_id;
6409         int ret;
6410
6411         func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6412         req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6413
6414         /* read current config parameter */
6415         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6416                                    true);
6417         req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6418         req->func_id = cpu_to_le32(func_id);
6419
6420         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6421         if (ret) {
6422                 dev_err(&hdev->pdev->dev,
6423                         "read mac vlan switch parameter fail, ret = %d\n", ret);
6424                 return ret;
6425         }
6426
6427         /* modify and write new config parameter */
6428         hclge_cmd_reuse_desc(&desc, false);
6429         req->switch_param = (req->switch_param & param_mask) | switch_param;
6430         req->param_mask = param_mask;
6431
6432         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6433         if (ret)
6434                 dev_err(&hdev->pdev->dev,
6435                         "set mac vlan switch parameter fail, ret = %d\n", ret);
6436         return ret;
6437 }
6438
6439 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6440                                        int link_ret)
6441 {
6442 #define HCLGE_PHY_LINK_STATUS_NUM  200
6443
6444         struct phy_device *phydev = hdev->hw.mac.phydev;
6445         int i = 0;
6446         int ret;
6447
6448         do {
6449                 ret = phy_read_status(phydev);
6450                 if (ret) {
6451                         dev_err(&hdev->pdev->dev,
6452                                 "phy update link status fail, ret = %d\n", ret);
6453                         return;
6454                 }
6455
6456                 if (phydev->link == link_ret)
6457                         break;
6458
6459                 msleep(HCLGE_LINK_STATUS_MS);
6460         } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6461 }
6462
6463 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6464 {
6465 #define HCLGE_MAC_LINK_STATUS_NUM  100
6466
6467         int i = 0;
6468         int ret;
6469
6470         do {
6471                 ret = hclge_get_mac_link_status(hdev);
6472                 if (ret < 0)
6473                         return ret;
6474                 else if (ret == link_ret)
6475                         return 0;
6476
6477                 msleep(HCLGE_LINK_STATUS_MS);
6478         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6479         return -EBUSY;
6480 }
6481
6482 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6483                                           bool is_phy)
6484 {
6485 #define HCLGE_LINK_STATUS_DOWN 0
6486 #define HCLGE_LINK_STATUS_UP   1
6487
6488         int link_ret;
6489
6490         link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6491
6492         if (is_phy)
6493                 hclge_phy_link_status_wait(hdev, link_ret);
6494
6495         return hclge_mac_link_status_wait(hdev, link_ret);
6496 }
6497
6498 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6499 {
6500         struct hclge_config_mac_mode_cmd *req;
6501         struct hclge_desc desc;
6502         u32 loop_en;
6503         int ret;
6504
6505         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6506         /* 1 Read out the MAC mode config at first */
6507         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6508         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6509         if (ret) {
6510                 dev_err(&hdev->pdev->dev,
6511                         "mac loopback get fail, ret =%d.\n", ret);
6512                 return ret;
6513         }
6514
6515         /* 2 Then setup the loopback flag */
6516         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6517         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6518         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6519         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6520
6521         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6522
6523         /* 3 Config mac work mode with loopback flag
6524          * and its original configure parameters
6525          */
6526         hclge_cmd_reuse_desc(&desc, false);
6527         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6528         if (ret)
6529                 dev_err(&hdev->pdev->dev,
6530                         "mac loopback set fail, ret =%d.\n", ret);
6531         return ret;
6532 }
6533
6534 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6535                                      enum hnae3_loop loop_mode)
6536 {
6537 #define HCLGE_SERDES_RETRY_MS   10
6538 #define HCLGE_SERDES_RETRY_NUM  100
6539
6540         struct hclge_serdes_lb_cmd *req;
6541         struct hclge_desc desc;
6542         int ret, i = 0;
6543         u8 loop_mode_b;
6544
6545         req = (struct hclge_serdes_lb_cmd *)desc.data;
6546         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6547
6548         switch (loop_mode) {
6549         case HNAE3_LOOP_SERIAL_SERDES:
6550                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6551                 break;
6552         case HNAE3_LOOP_PARALLEL_SERDES:
6553                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6554                 break;
6555         default:
6556                 dev_err(&hdev->pdev->dev,
6557                         "unsupported serdes loopback mode %d\n", loop_mode);
6558                 return -ENOTSUPP;
6559         }
6560
6561         if (en) {
6562                 req->enable = loop_mode_b;
6563                 req->mask = loop_mode_b;
6564         } else {
6565                 req->mask = loop_mode_b;
6566         }
6567
6568         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6569         if (ret) {
6570                 dev_err(&hdev->pdev->dev,
6571                         "serdes loopback set fail, ret = %d\n", ret);
6572                 return ret;
6573         }
6574
6575         do {
6576                 msleep(HCLGE_SERDES_RETRY_MS);
6577                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6578                                            true);
6579                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6580                 if (ret) {
6581                         dev_err(&hdev->pdev->dev,
6582                                 "serdes loopback get, ret = %d\n", ret);
6583                         return ret;
6584                 }
6585         } while (++i < HCLGE_SERDES_RETRY_NUM &&
6586                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
6587
6588         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6589                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6590                 return -EBUSY;
6591         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6592                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6593                 return -EIO;
6594         }
6595         return ret;
6596 }
6597
6598 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6599                                      enum hnae3_loop loop_mode)
6600 {
6601         int ret;
6602
6603         ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6604         if (ret)
6605                 return ret;
6606
6607         hclge_cfg_mac_mode(hdev, en);
6608
6609         ret = hclge_mac_phy_link_status_wait(hdev, en, FALSE);
6610         if (ret)
6611                 dev_err(&hdev->pdev->dev,
6612                         "serdes loopback config mac mode timeout\n");
6613
6614         return ret;
6615 }
6616
6617 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6618                                      struct phy_device *phydev)
6619 {
6620         int ret;
6621
6622         if (!phydev->suspended) {
6623                 ret = phy_suspend(phydev);
6624                 if (ret)
6625                         return ret;
6626         }
6627
6628         ret = phy_resume(phydev);
6629         if (ret)
6630                 return ret;
6631
6632         return phy_loopback(phydev, true);
6633 }
6634
6635 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6636                                       struct phy_device *phydev)
6637 {
6638         int ret;
6639
6640         ret = phy_loopback(phydev, false);
6641         if (ret)
6642                 return ret;
6643
6644         return phy_suspend(phydev);
6645 }
6646
6647 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6648 {
6649         struct phy_device *phydev = hdev->hw.mac.phydev;
6650         int ret;
6651
6652         if (!phydev)
6653                 return -ENOTSUPP;
6654
6655         if (en)
6656                 ret = hclge_enable_phy_loopback(hdev, phydev);
6657         else
6658                 ret = hclge_disable_phy_loopback(hdev, phydev);
6659         if (ret) {
6660                 dev_err(&hdev->pdev->dev,
6661                         "set phy loopback fail, ret = %d\n", ret);
6662                 return ret;
6663         }
6664
6665         hclge_cfg_mac_mode(hdev, en);
6666
6667         ret = hclge_mac_phy_link_status_wait(hdev, en, TRUE);
6668         if (ret)
6669                 dev_err(&hdev->pdev->dev,
6670                         "phy loopback config mac mode timeout\n");
6671
6672         return ret;
6673 }
6674
6675 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6676                             int stream_id, bool enable)
6677 {
6678         struct hclge_desc desc;
6679         struct hclge_cfg_com_tqp_queue_cmd *req =
6680                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6681         int ret;
6682
6683         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6684         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6685         req->stream_id = cpu_to_le16(stream_id);
6686         if (enable)
6687                 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6688
6689         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6690         if (ret)
6691                 dev_err(&hdev->pdev->dev,
6692                         "Tqp enable fail, status =%d.\n", ret);
6693         return ret;
6694 }
6695
6696 static int hclge_set_loopback(struct hnae3_handle *handle,
6697                               enum hnae3_loop loop_mode, bool en)
6698 {
6699         struct hclge_vport *vport = hclge_get_vport(handle);
6700         struct hnae3_knic_private_info *kinfo;
6701         struct hclge_dev *hdev = vport->back;
6702         int i, ret;
6703
6704         /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6705          * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6706          * the same, the packets are looped back in the SSU. If SSU loopback
6707          * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6708          */
6709         if (hdev->pdev->revision >= 0x21) {
6710                 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6711
6712                 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6713                                                 HCLGE_SWITCH_ALW_LPBK_MASK);
6714                 if (ret)
6715                         return ret;
6716         }
6717
6718         switch (loop_mode) {
6719         case HNAE3_LOOP_APP:
6720                 ret = hclge_set_app_loopback(hdev, en);
6721                 break;
6722         case HNAE3_LOOP_SERIAL_SERDES:
6723         case HNAE3_LOOP_PARALLEL_SERDES:
6724                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6725                 break;
6726         case HNAE3_LOOP_PHY:
6727                 ret = hclge_set_phy_loopback(hdev, en);
6728                 break;
6729         default:
6730                 ret = -ENOTSUPP;
6731                 dev_err(&hdev->pdev->dev,
6732                         "loop_mode %d is not supported\n", loop_mode);
6733                 break;
6734         }
6735
6736         if (ret)
6737                 return ret;
6738
6739         kinfo = &vport->nic.kinfo;
6740         for (i = 0; i < kinfo->num_tqps; i++) {
6741                 ret = hclge_tqp_enable(hdev, i, 0, en);
6742                 if (ret)
6743                         return ret;
6744         }
6745
6746         return 0;
6747 }
6748
6749 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6750 {
6751         int ret;
6752
6753         ret = hclge_set_app_loopback(hdev, false);
6754         if (ret)
6755                 return ret;
6756
6757         ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6758         if (ret)
6759                 return ret;
6760
6761         return hclge_cfg_serdes_loopback(hdev, false,
6762                                          HNAE3_LOOP_PARALLEL_SERDES);
6763 }
6764
6765 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6766 {
6767         struct hclge_vport *vport = hclge_get_vport(handle);
6768         struct hnae3_knic_private_info *kinfo;
6769         struct hnae3_queue *queue;
6770         struct hclge_tqp *tqp;
6771         int i;
6772
6773         kinfo = &vport->nic.kinfo;
6774         for (i = 0; i < kinfo->num_tqps; i++) {
6775                 queue = handle->kinfo.tqp[i];
6776                 tqp = container_of(queue, struct hclge_tqp, q);
6777                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6778         }
6779 }
6780
6781 static void hclge_flush_link_update(struct hclge_dev *hdev)
6782 {
6783 #define HCLGE_FLUSH_LINK_TIMEOUT        100000
6784
6785         unsigned long last = hdev->serv_processed_cnt;
6786         int i = 0;
6787
6788         while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6789                i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6790                last == hdev->serv_processed_cnt)
6791                 usleep_range(1, 1);
6792 }
6793
6794 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6795 {
6796         struct hclge_vport *vport = hclge_get_vport(handle);
6797         struct hclge_dev *hdev = vport->back;
6798
6799         if (enable) {
6800                 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
6801         } else {
6802                 /* Set the DOWN flag here to disable link updating */
6803                 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6804
6805                 /* flush memory to make sure DOWN is seen by service task */
6806                 smp_mb__before_atomic();
6807                 hclge_flush_link_update(hdev);
6808         }
6809 }
6810
6811 static int hclge_ae_start(struct hnae3_handle *handle)
6812 {
6813         struct hclge_vport *vport = hclge_get_vport(handle);
6814         struct hclge_dev *hdev = vport->back;
6815
6816         /* mac enable */
6817         hclge_cfg_mac_mode(hdev, true);
6818         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6819         hdev->hw.mac.link = 0;
6820
6821         /* reset tqp stats */
6822         hclge_reset_tqp_stats(handle);
6823
6824         hclge_mac_start_phy(hdev);
6825
6826         return 0;
6827 }
6828
6829 static void hclge_ae_stop(struct hnae3_handle *handle)
6830 {
6831         struct hclge_vport *vport = hclge_get_vport(handle);
6832         struct hclge_dev *hdev = vport->back;
6833         int i;
6834
6835         set_bit(HCLGE_STATE_DOWN, &hdev->state);
6836
6837         hclge_clear_arfs_rules(handle);
6838
6839         /* If it is not PF reset, the firmware will disable the MAC,
6840          * so it only need to stop phy here.
6841          */
6842         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6843             hdev->reset_type != HNAE3_FUNC_RESET) {
6844                 hclge_mac_stop_phy(hdev);
6845                 hclge_update_link_status(hdev);
6846                 return;
6847         }
6848
6849         for (i = 0; i < handle->kinfo.num_tqps; i++)
6850                 hclge_reset_tqp(handle, i);
6851
6852         hclge_config_mac_tnl_int(hdev, false);
6853
6854         /* Mac disable */
6855         hclge_cfg_mac_mode(hdev, false);
6856
6857         hclge_mac_stop_phy(hdev);
6858
6859         /* reset tqp stats */
6860         hclge_reset_tqp_stats(handle);
6861         hclge_update_link_status(hdev);
6862 }
6863
6864 int hclge_vport_start(struct hclge_vport *vport)
6865 {
6866         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6867         vport->last_active_jiffies = jiffies;
6868         return 0;
6869 }
6870
6871 void hclge_vport_stop(struct hclge_vport *vport)
6872 {
6873         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6874 }
6875
6876 static int hclge_client_start(struct hnae3_handle *handle)
6877 {
6878         struct hclge_vport *vport = hclge_get_vport(handle);
6879
6880         return hclge_vport_start(vport);
6881 }
6882
6883 static void hclge_client_stop(struct hnae3_handle *handle)
6884 {
6885         struct hclge_vport *vport = hclge_get_vport(handle);
6886
6887         hclge_vport_stop(vport);
6888 }
6889
6890 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6891                                          u16 cmdq_resp, u8  resp_code,
6892                                          enum hclge_mac_vlan_tbl_opcode op)
6893 {
6894         struct hclge_dev *hdev = vport->back;
6895
6896         if (cmdq_resp) {
6897                 dev_err(&hdev->pdev->dev,
6898                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
6899                         cmdq_resp);
6900                 return -EIO;
6901         }
6902
6903         if (op == HCLGE_MAC_VLAN_ADD) {
6904                 if ((!resp_code) || (resp_code == 1)) {
6905                         return 0;
6906                 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6907                         dev_err(&hdev->pdev->dev,
6908                                 "add mac addr failed for uc_overflow.\n");
6909                         return -ENOSPC;
6910                 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6911                         dev_err(&hdev->pdev->dev,
6912                                 "add mac addr failed for mc_overflow.\n");
6913                         return -ENOSPC;
6914                 }
6915
6916                 dev_err(&hdev->pdev->dev,
6917                         "add mac addr failed for undefined, code=%u.\n",
6918                         resp_code);
6919                 return -EIO;
6920         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6921                 if (!resp_code) {
6922                         return 0;
6923                 } else if (resp_code == 1) {
6924                         dev_dbg(&hdev->pdev->dev,
6925                                 "remove mac addr failed for miss.\n");
6926                         return -ENOENT;
6927                 }
6928
6929                 dev_err(&hdev->pdev->dev,
6930                         "remove mac addr failed for undefined, code=%u.\n",
6931                         resp_code);
6932                 return -EIO;
6933         } else if (op == HCLGE_MAC_VLAN_LKUP) {
6934                 if (!resp_code) {
6935                         return 0;
6936                 } else if (resp_code == 1) {
6937                         dev_dbg(&hdev->pdev->dev,
6938                                 "lookup mac addr failed for miss.\n");
6939                         return -ENOENT;
6940                 }
6941
6942                 dev_err(&hdev->pdev->dev,
6943                         "lookup mac addr failed for undefined, code=%u.\n",
6944                         resp_code);
6945                 return -EIO;
6946         }
6947
6948         dev_err(&hdev->pdev->dev,
6949                 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
6950
6951         return -EINVAL;
6952 }
6953
6954 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6955 {
6956 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6957
6958         unsigned int word_num;
6959         unsigned int bit_num;
6960
6961         if (vfid > 255 || vfid < 0)
6962                 return -EIO;
6963
6964         if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6965                 word_num = vfid / 32;
6966                 bit_num  = vfid % 32;
6967                 if (clr)
6968                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6969                 else
6970                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6971         } else {
6972                 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6973                 bit_num  = vfid % 32;
6974                 if (clr)
6975                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6976                 else
6977                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6978         }
6979
6980         return 0;
6981 }
6982
6983 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6984 {
6985 #define HCLGE_DESC_NUMBER 3
6986 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6987         int i, j;
6988
6989         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6990                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6991                         if (desc[i].data[j])
6992                                 return false;
6993
6994         return true;
6995 }
6996
6997 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6998                                    const u8 *addr, bool is_mc)
6999 {
7000         const unsigned char *mac_addr = addr;
7001         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7002                        (mac_addr[0]) | (mac_addr[1] << 8);
7003         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
7004
7005         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7006         if (is_mc) {
7007                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7008                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7009         }
7010
7011         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7012         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7013 }
7014
7015 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7016                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
7017 {
7018         struct hclge_dev *hdev = vport->back;
7019         struct hclge_desc desc;
7020         u8 resp_code;
7021         u16 retval;
7022         int ret;
7023
7024         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7025
7026         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7027
7028         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7029         if (ret) {
7030                 dev_err(&hdev->pdev->dev,
7031                         "del mac addr failed for cmd_send, ret =%d.\n",
7032                         ret);
7033                 return ret;
7034         }
7035         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7036         retval = le16_to_cpu(desc.retval);
7037
7038         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7039                                              HCLGE_MAC_VLAN_REMOVE);
7040 }
7041
7042 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7043                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
7044                                      struct hclge_desc *desc,
7045                                      bool is_mc)
7046 {
7047         struct hclge_dev *hdev = vport->back;
7048         u8 resp_code;
7049         u16 retval;
7050         int ret;
7051
7052         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7053         if (is_mc) {
7054                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7055                 memcpy(desc[0].data,
7056                        req,
7057                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7058                 hclge_cmd_setup_basic_desc(&desc[1],
7059                                            HCLGE_OPC_MAC_VLAN_ADD,
7060                                            true);
7061                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7062                 hclge_cmd_setup_basic_desc(&desc[2],
7063                                            HCLGE_OPC_MAC_VLAN_ADD,
7064                                            true);
7065                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7066         } else {
7067                 memcpy(desc[0].data,
7068                        req,
7069                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7070                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7071         }
7072         if (ret) {
7073                 dev_err(&hdev->pdev->dev,
7074                         "lookup mac addr failed for cmd_send, ret =%d.\n",
7075                         ret);
7076                 return ret;
7077         }
7078         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7079         retval = le16_to_cpu(desc[0].retval);
7080
7081         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7082                                              HCLGE_MAC_VLAN_LKUP);
7083 }
7084
7085 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7086                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
7087                                   struct hclge_desc *mc_desc)
7088 {
7089         struct hclge_dev *hdev = vport->back;
7090         int cfg_status;
7091         u8 resp_code;
7092         u16 retval;
7093         int ret;
7094
7095         if (!mc_desc) {
7096                 struct hclge_desc desc;
7097
7098                 hclge_cmd_setup_basic_desc(&desc,
7099                                            HCLGE_OPC_MAC_VLAN_ADD,
7100                                            false);
7101                 memcpy(desc.data, req,
7102                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7103                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7104                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7105                 retval = le16_to_cpu(desc.retval);
7106
7107                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7108                                                            resp_code,
7109                                                            HCLGE_MAC_VLAN_ADD);
7110         } else {
7111                 hclge_cmd_reuse_desc(&mc_desc[0], false);
7112                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7113                 hclge_cmd_reuse_desc(&mc_desc[1], false);
7114                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7115                 hclge_cmd_reuse_desc(&mc_desc[2], false);
7116                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7117                 memcpy(mc_desc[0].data, req,
7118                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7119                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7120                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7121                 retval = le16_to_cpu(mc_desc[0].retval);
7122
7123                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7124                                                            resp_code,
7125                                                            HCLGE_MAC_VLAN_ADD);
7126         }
7127
7128         if (ret) {
7129                 dev_err(&hdev->pdev->dev,
7130                         "add mac addr failed for cmd_send, ret =%d.\n",
7131                         ret);
7132                 return ret;
7133         }
7134
7135         return cfg_status;
7136 }
7137
7138 static int hclge_init_umv_space(struct hclge_dev *hdev)
7139 {
7140         u16 allocated_size = 0;
7141         int ret;
7142
7143         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
7144                                   true);
7145         if (ret)
7146                 return ret;
7147
7148         if (allocated_size < hdev->wanted_umv_size)
7149                 dev_warn(&hdev->pdev->dev,
7150                          "Alloc umv space failed, want %u, get %u\n",
7151                          hdev->wanted_umv_size, allocated_size);
7152
7153         mutex_init(&hdev->umv_mutex);
7154         hdev->max_umv_size = allocated_size;
7155         /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
7156          * preserve some unicast mac vlan table entries shared by pf
7157          * and its vfs.
7158          */
7159         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
7160         hdev->share_umv_size = hdev->priv_umv_size +
7161                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
7162
7163         return 0;
7164 }
7165
7166 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
7167 {
7168         int ret;
7169
7170         if (hdev->max_umv_size > 0) {
7171                 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
7172                                           false);
7173                 if (ret)
7174                         return ret;
7175                 hdev->max_umv_size = 0;
7176         }
7177         mutex_destroy(&hdev->umv_mutex);
7178
7179         return 0;
7180 }
7181
7182 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7183                                u16 *allocated_size, bool is_alloc)
7184 {
7185         struct hclge_umv_spc_alc_cmd *req;
7186         struct hclge_desc desc;
7187         int ret;
7188
7189         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7190         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7191         if (!is_alloc)
7192                 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
7193
7194         req->space_size = cpu_to_le32(space_size);
7195
7196         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7197         if (ret) {
7198                 dev_err(&hdev->pdev->dev,
7199                         "%s umv space failed for cmd_send, ret =%d\n",
7200                         is_alloc ? "allocate" : "free", ret);
7201                 return ret;
7202         }
7203
7204         if (is_alloc && allocated_size)
7205                 *allocated_size = le32_to_cpu(desc.data[1]);
7206
7207         return 0;
7208 }
7209
7210 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7211 {
7212         struct hclge_vport *vport;
7213         int i;
7214
7215         for (i = 0; i < hdev->num_alloc_vport; i++) {
7216                 vport = &hdev->vport[i];
7217                 vport->used_umv_num = 0;
7218         }
7219
7220         mutex_lock(&hdev->umv_mutex);
7221         hdev->share_umv_size = hdev->priv_umv_size +
7222                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
7223         mutex_unlock(&hdev->umv_mutex);
7224 }
7225
7226 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
7227 {
7228         struct hclge_dev *hdev = vport->back;
7229         bool is_full;
7230
7231         mutex_lock(&hdev->umv_mutex);
7232         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7233                    hdev->share_umv_size == 0);
7234         mutex_unlock(&hdev->umv_mutex);
7235
7236         return is_full;
7237 }
7238
7239 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7240 {
7241         struct hclge_dev *hdev = vport->back;
7242
7243         mutex_lock(&hdev->umv_mutex);
7244         if (is_free) {
7245                 if (vport->used_umv_num > hdev->priv_umv_size)
7246                         hdev->share_umv_size++;
7247
7248                 if (vport->used_umv_num > 0)
7249                         vport->used_umv_num--;
7250         } else {
7251                 if (vport->used_umv_num >= hdev->priv_umv_size &&
7252                     hdev->share_umv_size > 0)
7253                         hdev->share_umv_size--;
7254                 vport->used_umv_num++;
7255         }
7256         mutex_unlock(&hdev->umv_mutex);
7257 }
7258
7259 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7260                              const unsigned char *addr)
7261 {
7262         struct hclge_vport *vport = hclge_get_vport(handle);
7263
7264         return hclge_add_uc_addr_common(vport, addr);
7265 }
7266
7267 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7268                              const unsigned char *addr)
7269 {
7270         struct hclge_dev *hdev = vport->back;
7271         struct hclge_mac_vlan_tbl_entry_cmd req;
7272         struct hclge_desc desc;
7273         u16 egress_port = 0;
7274         int ret;
7275
7276         /* mac addr check */
7277         if (is_zero_ether_addr(addr) ||
7278             is_broadcast_ether_addr(addr) ||
7279             is_multicast_ether_addr(addr)) {
7280                 dev_err(&hdev->pdev->dev,
7281                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7282                          addr, is_zero_ether_addr(addr),
7283                          is_broadcast_ether_addr(addr),
7284                          is_multicast_ether_addr(addr));
7285                 return -EINVAL;
7286         }
7287
7288         memset(&req, 0, sizeof(req));
7289
7290         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7291                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7292
7293         req.egress_port = cpu_to_le16(egress_port);
7294
7295         hclge_prepare_mac_addr(&req, addr, false);
7296
7297         /* Lookup the mac address in the mac_vlan table, and add
7298          * it if the entry is inexistent. Repeated unicast entry
7299          * is not allowed in the mac vlan table.
7300          */
7301         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7302         if (ret == -ENOENT) {
7303                 if (!hclge_is_umv_space_full(vport)) {
7304                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7305                         if (!ret)
7306                                 hclge_update_umv_space(vport, false);
7307                         return ret;
7308                 }
7309
7310                 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7311                         hdev->priv_umv_size);
7312
7313                 return -ENOSPC;
7314         }
7315
7316         /* check if we just hit the duplicate */
7317         if (!ret) {
7318                 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7319                          vport->vport_id, addr);
7320                 return 0;
7321         }
7322
7323         dev_err(&hdev->pdev->dev,
7324                 "PF failed to add unicast entry(%pM) in the MAC table\n",
7325                 addr);
7326
7327         return ret;
7328 }
7329
7330 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7331                             const unsigned char *addr)
7332 {
7333         struct hclge_vport *vport = hclge_get_vport(handle);
7334
7335         return hclge_rm_uc_addr_common(vport, addr);
7336 }
7337
7338 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7339                             const unsigned char *addr)
7340 {
7341         struct hclge_dev *hdev = vport->back;
7342         struct hclge_mac_vlan_tbl_entry_cmd req;
7343         int ret;
7344
7345         /* mac addr check */
7346         if (is_zero_ether_addr(addr) ||
7347             is_broadcast_ether_addr(addr) ||
7348             is_multicast_ether_addr(addr)) {
7349                 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7350                         addr);
7351                 return -EINVAL;
7352         }
7353
7354         memset(&req, 0, sizeof(req));
7355         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7356         hclge_prepare_mac_addr(&req, addr, false);
7357         ret = hclge_remove_mac_vlan_tbl(vport, &req);
7358         if (!ret)
7359                 hclge_update_umv_space(vport, true);
7360
7361         return ret;
7362 }
7363
7364 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7365                              const unsigned char *addr)
7366 {
7367         struct hclge_vport *vport = hclge_get_vport(handle);
7368
7369         return hclge_add_mc_addr_common(vport, addr);
7370 }
7371
7372 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7373                              const unsigned char *addr)
7374 {
7375         struct hclge_dev *hdev = vport->back;
7376         struct hclge_mac_vlan_tbl_entry_cmd req;
7377         struct hclge_desc desc[3];
7378         int status;
7379
7380         /* mac addr check */
7381         if (!is_multicast_ether_addr(addr)) {
7382                 dev_err(&hdev->pdev->dev,
7383                         "Add mc mac err! invalid mac:%pM.\n",
7384                          addr);
7385                 return -EINVAL;
7386         }
7387         memset(&req, 0, sizeof(req));
7388         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7389         hclge_prepare_mac_addr(&req, addr, true);
7390         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7391         if (status) {
7392                 /* This mac addr do not exist, add new entry for it */
7393                 memset(desc[0].data, 0, sizeof(desc[0].data));
7394                 memset(desc[1].data, 0, sizeof(desc[0].data));
7395                 memset(desc[2].data, 0, sizeof(desc[0].data));
7396         }
7397         status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7398         if (status)
7399                 return status;
7400         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7401
7402         if (status == -ENOSPC)
7403                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7404
7405         return status;
7406 }
7407
7408 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7409                             const unsigned char *addr)
7410 {
7411         struct hclge_vport *vport = hclge_get_vport(handle);
7412
7413         return hclge_rm_mc_addr_common(vport, addr);
7414 }
7415
7416 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7417                             const unsigned char *addr)
7418 {
7419         struct hclge_dev *hdev = vport->back;
7420         struct hclge_mac_vlan_tbl_entry_cmd req;
7421         enum hclge_cmd_status status;
7422         struct hclge_desc desc[3];
7423
7424         /* mac addr check */
7425         if (!is_multicast_ether_addr(addr)) {
7426                 dev_dbg(&hdev->pdev->dev,
7427                         "Remove mc mac err! invalid mac:%pM.\n",
7428                          addr);
7429                 return -EINVAL;
7430         }
7431
7432         memset(&req, 0, sizeof(req));
7433         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7434         hclge_prepare_mac_addr(&req, addr, true);
7435         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7436         if (!status) {
7437                 /* This mac addr exist, remove this handle's VFID for it */
7438                 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7439                 if (status)
7440                         return status;
7441
7442                 if (hclge_is_all_function_id_zero(desc))
7443                         /* All the vfid is zero, so need to delete this entry */
7444                         status = hclge_remove_mac_vlan_tbl(vport, &req);
7445                 else
7446                         /* Not all the vfid is zero, update the vfid */
7447                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7448
7449         } else {
7450                 /* Maybe this mac address is in mta table, but it cannot be
7451                  * deleted here because an entry of mta represents an address
7452                  * range rather than a specific address. the delete action to
7453                  * all entries will take effect in update_mta_status called by
7454                  * hns3_nic_set_rx_mode.
7455                  */
7456                 status = 0;
7457         }
7458
7459         return status;
7460 }
7461
7462 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7463                                enum HCLGE_MAC_ADDR_TYPE mac_type)
7464 {
7465         struct hclge_vport_mac_addr_cfg *mac_cfg;
7466         struct list_head *list;
7467
7468         if (!vport->vport_id)
7469                 return;
7470
7471         mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
7472         if (!mac_cfg)
7473                 return;
7474
7475         mac_cfg->hd_tbl_status = true;
7476         memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
7477
7478         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7479                &vport->uc_mac_list : &vport->mc_mac_list;
7480
7481         list_add_tail(&mac_cfg->node, list);
7482 }
7483
7484 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7485                               bool is_write_tbl,
7486                               enum HCLGE_MAC_ADDR_TYPE mac_type)
7487 {
7488         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7489         struct list_head *list;
7490         bool uc_flag, mc_flag;
7491
7492         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7493                &vport->uc_mac_list : &vport->mc_mac_list;
7494
7495         uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
7496         mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
7497
7498         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7499                 if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
7500                         if (uc_flag && mac_cfg->hd_tbl_status)
7501                                 hclge_rm_uc_addr_common(vport, mac_addr);
7502
7503                         if (mc_flag && mac_cfg->hd_tbl_status)
7504                                 hclge_rm_mc_addr_common(vport, mac_addr);
7505
7506                         list_del(&mac_cfg->node);
7507                         kfree(mac_cfg);
7508                         break;
7509                 }
7510         }
7511 }
7512
7513 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7514                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
7515 {
7516         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7517         struct list_head *list;
7518
7519         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7520                &vport->uc_mac_list : &vport->mc_mac_list;
7521
7522         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7523                 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
7524                         hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
7525
7526                 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
7527                         hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
7528
7529                 mac_cfg->hd_tbl_status = false;
7530                 if (is_del_list) {
7531                         list_del(&mac_cfg->node);
7532                         kfree(mac_cfg);
7533                 }
7534         }
7535 }
7536
7537 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
7538 {
7539         struct hclge_vport_mac_addr_cfg *mac, *tmp;
7540         struct hclge_vport *vport;
7541         int i;
7542
7543         for (i = 0; i < hdev->num_alloc_vport; i++) {
7544                 vport = &hdev->vport[i];
7545                 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
7546                         list_del(&mac->node);
7547                         kfree(mac);
7548                 }
7549
7550                 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
7551                         list_del(&mac->node);
7552                         kfree(mac);
7553                 }
7554         }
7555 }
7556
7557 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7558                                               u16 cmdq_resp, u8 resp_code)
7559 {
7560 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
7561 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
7562 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
7563 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
7564
7565         int return_status;
7566
7567         if (cmdq_resp) {
7568                 dev_err(&hdev->pdev->dev,
7569                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
7570                         cmdq_resp);
7571                 return -EIO;
7572         }
7573
7574         switch (resp_code) {
7575         case HCLGE_ETHERTYPE_SUCCESS_ADD:
7576         case HCLGE_ETHERTYPE_ALREADY_ADD:
7577                 return_status = 0;
7578                 break;
7579         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7580                 dev_err(&hdev->pdev->dev,
7581                         "add mac ethertype failed for manager table overflow.\n");
7582                 return_status = -EIO;
7583                 break;
7584         case HCLGE_ETHERTYPE_KEY_CONFLICT:
7585                 dev_err(&hdev->pdev->dev,
7586                         "add mac ethertype failed for key conflict.\n");
7587                 return_status = -EIO;
7588                 break;
7589         default:
7590                 dev_err(&hdev->pdev->dev,
7591                         "add mac ethertype failed for undefined, code=%u.\n",
7592                         resp_code);
7593                 return_status = -EIO;
7594         }
7595
7596         return return_status;
7597 }
7598
7599 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
7600                                      u8 *mac_addr)
7601 {
7602         struct hclge_mac_vlan_tbl_entry_cmd req;
7603         struct hclge_dev *hdev = vport->back;
7604         struct hclge_desc desc;
7605         u16 egress_port = 0;
7606         int i;
7607
7608         if (is_zero_ether_addr(mac_addr))
7609                 return false;
7610
7611         memset(&req, 0, sizeof(req));
7612         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7613                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7614         req.egress_port = cpu_to_le16(egress_port);
7615         hclge_prepare_mac_addr(&req, mac_addr, false);
7616
7617         if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
7618                 return true;
7619
7620         vf_idx += HCLGE_VF_VPORT_START_NUM;
7621         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
7622                 if (i != vf_idx &&
7623                     ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
7624                         return true;
7625
7626         return false;
7627 }
7628
7629 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
7630                             u8 *mac_addr)
7631 {
7632         struct hclge_vport *vport = hclge_get_vport(handle);
7633         struct hclge_dev *hdev = vport->back;
7634
7635         vport = hclge_get_vf_vport(hdev, vf);
7636         if (!vport)
7637                 return -EINVAL;
7638
7639         if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
7640                 dev_info(&hdev->pdev->dev,
7641                          "Specified MAC(=%pM) is same as before, no change committed!\n",
7642                          mac_addr);
7643                 return 0;
7644         }
7645
7646         if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
7647                 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
7648                         mac_addr);
7649                 return -EEXIST;
7650         }
7651
7652         ether_addr_copy(vport->vf_info.mac, mac_addr);
7653         dev_info(&hdev->pdev->dev,
7654                  "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
7655                  vf, mac_addr);
7656
7657         return hclge_inform_reset_assert_to_vf(vport);
7658 }
7659
7660 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
7661                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
7662 {
7663         struct hclge_desc desc;
7664         u8 resp_code;
7665         u16 retval;
7666         int ret;
7667
7668         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
7669         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
7670
7671         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7672         if (ret) {
7673                 dev_err(&hdev->pdev->dev,
7674                         "add mac ethertype failed for cmd_send, ret =%d.\n",
7675                         ret);
7676                 return ret;
7677         }
7678
7679         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7680         retval = le16_to_cpu(desc.retval);
7681
7682         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7683 }
7684
7685 static int init_mgr_tbl(struct hclge_dev *hdev)
7686 {
7687         int ret;
7688         int i;
7689
7690         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7691                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7692                 if (ret) {
7693                         dev_err(&hdev->pdev->dev,
7694                                 "add mac ethertype failed, ret =%d.\n",
7695                                 ret);
7696                         return ret;
7697                 }
7698         }
7699
7700         return 0;
7701 }
7702
7703 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7704 {
7705         struct hclge_vport *vport = hclge_get_vport(handle);
7706         struct hclge_dev *hdev = vport->back;
7707
7708         ether_addr_copy(p, hdev->hw.mac.mac_addr);
7709 }
7710
7711 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7712                               bool is_first)
7713 {
7714         const unsigned char *new_addr = (const unsigned char *)p;
7715         struct hclge_vport *vport = hclge_get_vport(handle);
7716         struct hclge_dev *hdev = vport->back;
7717         int ret;
7718
7719         /* mac addr check */
7720         if (is_zero_ether_addr(new_addr) ||
7721             is_broadcast_ether_addr(new_addr) ||
7722             is_multicast_ether_addr(new_addr)) {
7723                 dev_err(&hdev->pdev->dev,
7724                         "Change uc mac err! invalid mac:%pM.\n",
7725                          new_addr);
7726                 return -EINVAL;
7727         }
7728
7729         if ((!is_first || is_kdump_kernel()) &&
7730             hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7731                 dev_warn(&hdev->pdev->dev,
7732                          "remove old uc mac address fail.\n");
7733
7734         ret = hclge_add_uc_addr(handle, new_addr);
7735         if (ret) {
7736                 dev_err(&hdev->pdev->dev,
7737                         "add uc mac address fail, ret =%d.\n",
7738                         ret);
7739
7740                 if (!is_first &&
7741                     hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7742                         dev_err(&hdev->pdev->dev,
7743                                 "restore uc mac address fail.\n");
7744
7745                 return -EIO;
7746         }
7747
7748         ret = hclge_pause_addr_cfg(hdev, new_addr);
7749         if (ret) {
7750                 dev_err(&hdev->pdev->dev,
7751                         "configure mac pause address fail, ret =%d.\n",
7752                         ret);
7753                 return -EIO;
7754         }
7755
7756         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7757
7758         return 0;
7759 }
7760
7761 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7762                           int cmd)
7763 {
7764         struct hclge_vport *vport = hclge_get_vport(handle);
7765         struct hclge_dev *hdev = vport->back;
7766
7767         if (!hdev->hw.mac.phydev)
7768                 return -EOPNOTSUPP;
7769
7770         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7771 }
7772
7773 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7774                                       u8 fe_type, bool filter_en, u8 vf_id)
7775 {
7776         struct hclge_vlan_filter_ctrl_cmd *req;
7777         struct hclge_desc desc;
7778         int ret;
7779
7780         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7781
7782         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7783         req->vlan_type = vlan_type;
7784         req->vlan_fe = filter_en ? fe_type : 0;
7785         req->vf_id = vf_id;
7786
7787         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7788         if (ret)
7789                 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7790                         ret);
7791
7792         return ret;
7793 }
7794
7795 #define HCLGE_FILTER_TYPE_VF            0
7796 #define HCLGE_FILTER_TYPE_PORT          1
7797 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
7798 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
7799 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
7800 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
7801 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
7802 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
7803                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7804 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
7805                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7806
7807 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7808 {
7809         struct hclge_vport *vport = hclge_get_vport(handle);
7810         struct hclge_dev *hdev = vport->back;
7811
7812         if (hdev->pdev->revision >= 0x21) {
7813                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7814                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
7815                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7816                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
7817         } else {
7818                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7819                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7820                                            0);
7821         }
7822         if (enable)
7823                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7824         else
7825                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7826 }
7827
7828 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7829                                     bool is_kill, u16 vlan,
7830                                     __be16 proto)
7831 {
7832         struct hclge_vport *vport = &hdev->vport[vfid];
7833         struct hclge_vlan_filter_vf_cfg_cmd *req0;
7834         struct hclge_vlan_filter_vf_cfg_cmd *req1;
7835         struct hclge_desc desc[2];
7836         u8 vf_byte_val;
7837         u8 vf_byte_off;
7838         int ret;
7839
7840         /* if vf vlan table is full, firmware will close vf vlan filter, it
7841          * is unable and unnecessary to add new vlan id to vf vlan filter.
7842          * If spoof check is enable, and vf vlan is full, it shouldn't add
7843          * new vlan, because tx packets with these vlan id will be dropped.
7844          */
7845         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
7846                 if (vport->vf_info.spoofchk && vlan) {
7847                         dev_err(&hdev->pdev->dev,
7848                                 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
7849                         return -EPERM;
7850                 }
7851                 return 0;
7852         }
7853
7854         hclge_cmd_setup_basic_desc(&desc[0],
7855                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7856         hclge_cmd_setup_basic_desc(&desc[1],
7857                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7858
7859         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7860
7861         vf_byte_off = vfid / 8;
7862         vf_byte_val = 1 << (vfid % 8);
7863
7864         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7865         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7866
7867         req0->vlan_id  = cpu_to_le16(vlan);
7868         req0->vlan_cfg = is_kill;
7869
7870         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7871                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7872         else
7873                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7874
7875         ret = hclge_cmd_send(&hdev->hw, desc, 2);
7876         if (ret) {
7877                 dev_err(&hdev->pdev->dev,
7878                         "Send vf vlan command fail, ret =%d.\n",
7879                         ret);
7880                 return ret;
7881         }
7882
7883         if (!is_kill) {
7884 #define HCLGE_VF_VLAN_NO_ENTRY  2
7885                 if (!req0->resp_code || req0->resp_code == 1)
7886                         return 0;
7887
7888                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7889                         set_bit(vfid, hdev->vf_vlan_full);
7890                         dev_warn(&hdev->pdev->dev,
7891                                  "vf vlan table is full, vf vlan filter is disabled\n");
7892                         return 0;
7893                 }
7894
7895                 dev_err(&hdev->pdev->dev,
7896                         "Add vf vlan filter fail, ret =%u.\n",
7897                         req0->resp_code);
7898         } else {
7899 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
7900                 if (!req0->resp_code)
7901                         return 0;
7902
7903                 /* vf vlan filter is disabled when vf vlan table is full,
7904                  * then new vlan id will not be added into vf vlan table.
7905                  * Just return 0 without warning, avoid massive verbose
7906                  * print logs when unload.
7907                  */
7908                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7909                         return 0;
7910
7911                 dev_err(&hdev->pdev->dev,
7912                         "Kill vf vlan filter fail, ret =%u.\n",
7913                         req0->resp_code);
7914         }
7915
7916         return -EIO;
7917 }
7918
7919 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7920                                       u16 vlan_id, bool is_kill)
7921 {
7922         struct hclge_vlan_filter_pf_cfg_cmd *req;
7923         struct hclge_desc desc;
7924         u8 vlan_offset_byte_val;
7925         u8 vlan_offset_byte;
7926         u8 vlan_offset_160;
7927         int ret;
7928
7929         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7930
7931         vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
7932         vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
7933                            HCLGE_VLAN_BYTE_SIZE;
7934         vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
7935
7936         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7937         req->vlan_offset = vlan_offset_160;
7938         req->vlan_cfg = is_kill;
7939         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7940
7941         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7942         if (ret)
7943                 dev_err(&hdev->pdev->dev,
7944                         "port vlan command, send fail, ret =%d.\n", ret);
7945         return ret;
7946 }
7947
7948 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7949                                     u16 vport_id, u16 vlan_id,
7950                                     bool is_kill)
7951 {
7952         u16 vport_idx, vport_num = 0;
7953         int ret;
7954
7955         if (is_kill && !vlan_id)
7956                 return 0;
7957
7958         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7959                                        proto);
7960         if (ret) {
7961                 dev_err(&hdev->pdev->dev,
7962                         "Set %u vport vlan filter config fail, ret =%d.\n",
7963                         vport_id, ret);
7964                 return ret;
7965         }
7966
7967         /* vlan 0 may be added twice when 8021q module is enabled */
7968         if (!is_kill && !vlan_id &&
7969             test_bit(vport_id, hdev->vlan_table[vlan_id]))
7970                 return 0;
7971
7972         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7973                 dev_err(&hdev->pdev->dev,
7974                         "Add port vlan failed, vport %u is already in vlan %u\n",
7975                         vport_id, vlan_id);
7976                 return -EINVAL;
7977         }
7978
7979         if (is_kill &&
7980             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7981                 dev_err(&hdev->pdev->dev,
7982                         "Delete port vlan failed, vport %u is not in vlan %u\n",
7983                         vport_id, vlan_id);
7984                 return -EINVAL;
7985         }
7986
7987         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7988                 vport_num++;
7989
7990         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7991                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7992                                                  is_kill);
7993
7994         return ret;
7995 }
7996
7997 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7998 {
7999         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
8000         struct hclge_vport_vtag_tx_cfg_cmd *req;
8001         struct hclge_dev *hdev = vport->back;
8002         struct hclge_desc desc;
8003         u16 bmap_index;
8004         int status;
8005
8006         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
8007
8008         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
8009         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
8010         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
8011         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
8012                       vcfg->accept_tag1 ? 1 : 0);
8013         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
8014                       vcfg->accept_untag1 ? 1 : 0);
8015         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
8016                       vcfg->accept_tag2 ? 1 : 0);
8017         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
8018                       vcfg->accept_untag2 ? 1 : 0);
8019         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
8020                       vcfg->insert_tag1_en ? 1 : 0);
8021         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8022                       vcfg->insert_tag2_en ? 1 : 0);
8023         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
8024
8025         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8026         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8027                         HCLGE_VF_NUM_PER_BYTE;
8028         req->vf_bitmap[bmap_index] =
8029                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8030
8031         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8032         if (status)
8033                 dev_err(&hdev->pdev->dev,
8034                         "Send port txvlan cfg command fail, ret =%d\n",
8035                         status);
8036
8037         return status;
8038 }
8039
8040 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8041 {
8042         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8043         struct hclge_vport_vtag_rx_cfg_cmd *req;
8044         struct hclge_dev *hdev = vport->back;
8045         struct hclge_desc desc;
8046         u16 bmap_index;
8047         int status;
8048
8049         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8050
8051         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8052         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8053                       vcfg->strip_tag1_en ? 1 : 0);
8054         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8055                       vcfg->strip_tag2_en ? 1 : 0);
8056         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8057                       vcfg->vlan1_vlan_prionly ? 1 : 0);
8058         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8059                       vcfg->vlan2_vlan_prionly ? 1 : 0);
8060
8061         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8062         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8063                         HCLGE_VF_NUM_PER_BYTE;
8064         req->vf_bitmap[bmap_index] =
8065                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8066
8067         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8068         if (status)
8069                 dev_err(&hdev->pdev->dev,
8070                         "Send port rxvlan cfg command fail, ret =%d\n",
8071                         status);
8072
8073         return status;
8074 }
8075
8076 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8077                                   u16 port_base_vlan_state,
8078                                   u16 vlan_tag)
8079 {
8080         int ret;
8081
8082         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8083                 vport->txvlan_cfg.accept_tag1 = true;
8084                 vport->txvlan_cfg.insert_tag1_en = false;
8085                 vport->txvlan_cfg.default_tag1 = 0;
8086         } else {
8087                 vport->txvlan_cfg.accept_tag1 = false;
8088                 vport->txvlan_cfg.insert_tag1_en = true;
8089                 vport->txvlan_cfg.default_tag1 = vlan_tag;
8090         }
8091
8092         vport->txvlan_cfg.accept_untag1 = true;
8093
8094         /* accept_tag2 and accept_untag2 are not supported on
8095          * pdev revision(0x20), new revision support them,
8096          * this two fields can not be configured by user.
8097          */
8098         vport->txvlan_cfg.accept_tag2 = true;
8099         vport->txvlan_cfg.accept_untag2 = true;
8100         vport->txvlan_cfg.insert_tag2_en = false;
8101         vport->txvlan_cfg.default_tag2 = 0;
8102
8103         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8104                 vport->rxvlan_cfg.strip_tag1_en = false;
8105                 vport->rxvlan_cfg.strip_tag2_en =
8106                                 vport->rxvlan_cfg.rx_vlan_offload_en;
8107         } else {
8108                 vport->rxvlan_cfg.strip_tag1_en =
8109                                 vport->rxvlan_cfg.rx_vlan_offload_en;
8110                 vport->rxvlan_cfg.strip_tag2_en = true;
8111         }
8112         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8113         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8114
8115         ret = hclge_set_vlan_tx_offload_cfg(vport);
8116         if (ret)
8117                 return ret;
8118
8119         return hclge_set_vlan_rx_offload_cfg(vport);
8120 }
8121
8122 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8123 {
8124         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8125         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8126         struct hclge_desc desc;
8127         int status;
8128
8129         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8130         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8131         rx_req->ot_fst_vlan_type =
8132                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8133         rx_req->ot_sec_vlan_type =
8134                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8135         rx_req->in_fst_vlan_type =
8136                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8137         rx_req->in_sec_vlan_type =
8138                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8139
8140         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8141         if (status) {
8142                 dev_err(&hdev->pdev->dev,
8143                         "Send rxvlan protocol type command fail, ret =%d\n",
8144                         status);
8145                 return status;
8146         }
8147
8148         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8149
8150         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8151         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8152         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8153
8154         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8155         if (status)
8156                 dev_err(&hdev->pdev->dev,
8157                         "Send txvlan protocol type command fail, ret =%d\n",
8158                         status);
8159
8160         return status;
8161 }
8162
8163 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8164 {
8165 #define HCLGE_DEF_VLAN_TYPE             0x8100
8166
8167         struct hnae3_handle *handle = &hdev->vport[0].nic;
8168         struct hclge_vport *vport;
8169         int ret;
8170         int i;
8171
8172         if (hdev->pdev->revision >= 0x21) {
8173                 /* for revision 0x21, vf vlan filter is per function */
8174                 for (i = 0; i < hdev->num_alloc_vport; i++) {
8175                         vport = &hdev->vport[i];
8176                         ret = hclge_set_vlan_filter_ctrl(hdev,
8177                                                          HCLGE_FILTER_TYPE_VF,
8178                                                          HCLGE_FILTER_FE_EGRESS,
8179                                                          true,
8180                                                          vport->vport_id);
8181                         if (ret)
8182                                 return ret;
8183                 }
8184
8185                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8186                                                  HCLGE_FILTER_FE_INGRESS, true,
8187                                                  0);
8188                 if (ret)
8189                         return ret;
8190         } else {
8191                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8192                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
8193                                                  true, 0);
8194                 if (ret)
8195                         return ret;
8196         }
8197
8198         handle->netdev_flags |= HNAE3_VLAN_FLTR;
8199
8200         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8201         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8202         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8203         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8204         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8205         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8206
8207         ret = hclge_set_vlan_protocol_type(hdev);
8208         if (ret)
8209                 return ret;
8210
8211         for (i = 0; i < hdev->num_alloc_vport; i++) {
8212                 u16 vlan_tag;
8213
8214                 vport = &hdev->vport[i];
8215                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8216
8217                 ret = hclge_vlan_offload_cfg(vport,
8218                                              vport->port_base_vlan_cfg.state,
8219                                              vlan_tag);
8220                 if (ret)
8221                         return ret;
8222         }
8223
8224         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8225 }
8226
8227 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8228                                        bool writen_to_tbl)
8229 {
8230         struct hclge_vport_vlan_cfg *vlan;
8231
8232         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8233         if (!vlan)
8234                 return;
8235
8236         vlan->hd_tbl_status = writen_to_tbl;
8237         vlan->vlan_id = vlan_id;
8238
8239         list_add_tail(&vlan->node, &vport->vlan_list);
8240 }
8241
8242 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8243 {
8244         struct hclge_vport_vlan_cfg *vlan, *tmp;
8245         struct hclge_dev *hdev = vport->back;
8246         int ret;
8247
8248         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8249                 if (!vlan->hd_tbl_status) {
8250                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8251                                                        vport->vport_id,
8252                                                        vlan->vlan_id, false);
8253                         if (ret) {
8254                                 dev_err(&hdev->pdev->dev,
8255                                         "restore vport vlan list failed, ret=%d\n",
8256                                         ret);
8257                                 return ret;
8258                         }
8259                 }
8260                 vlan->hd_tbl_status = true;
8261         }
8262
8263         return 0;
8264 }
8265
8266 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8267                                       bool is_write_tbl)
8268 {
8269         struct hclge_vport_vlan_cfg *vlan, *tmp;
8270         struct hclge_dev *hdev = vport->back;
8271
8272         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8273                 if (vlan->vlan_id == vlan_id) {
8274                         if (is_write_tbl && vlan->hd_tbl_status)
8275                                 hclge_set_vlan_filter_hw(hdev,
8276                                                          htons(ETH_P_8021Q),
8277                                                          vport->vport_id,
8278                                                          vlan_id,
8279                                                          true);
8280
8281                         list_del(&vlan->node);
8282                         kfree(vlan);
8283                         break;
8284                 }
8285         }
8286 }
8287
8288 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8289 {
8290         struct hclge_vport_vlan_cfg *vlan, *tmp;
8291         struct hclge_dev *hdev = vport->back;
8292
8293         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8294                 if (vlan->hd_tbl_status)
8295                         hclge_set_vlan_filter_hw(hdev,
8296                                                  htons(ETH_P_8021Q),
8297                                                  vport->vport_id,
8298                                                  vlan->vlan_id,
8299                                                  true);
8300
8301                 vlan->hd_tbl_status = false;
8302                 if (is_del_list) {
8303                         list_del(&vlan->node);
8304                         kfree(vlan);
8305                 }
8306         }
8307 }
8308
8309 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8310 {
8311         struct hclge_vport_vlan_cfg *vlan, *tmp;
8312         struct hclge_vport *vport;
8313         int i;
8314
8315         for (i = 0; i < hdev->num_alloc_vport; i++) {
8316                 vport = &hdev->vport[i];
8317                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8318                         list_del(&vlan->node);
8319                         kfree(vlan);
8320                 }
8321         }
8322 }
8323
8324 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
8325 {
8326         struct hclge_vport *vport = hclge_get_vport(handle);
8327         struct hclge_vport_vlan_cfg *vlan, *tmp;
8328         struct hclge_dev *hdev = vport->back;
8329         u16 vlan_proto;
8330         u16 state, vlan_id;
8331         int i;
8332
8333         for (i = 0; i < hdev->num_alloc_vport; i++) {
8334                 vport = &hdev->vport[i];
8335                 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8336                 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8337                 state = vport->port_base_vlan_cfg.state;
8338
8339                 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8340                         hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8341                                                  vport->vport_id, vlan_id,
8342                                                  false);
8343                         continue;
8344                 }
8345
8346                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8347                         int ret;
8348
8349                         if (!vlan->hd_tbl_status)
8350                                 continue;
8351                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8352                                                        vport->vport_id,
8353                                                        vlan->vlan_id, false);
8354                         if (ret)
8355                                 break;
8356                 }
8357         }
8358 }
8359
8360 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8361 {
8362         struct hclge_vport *vport = hclge_get_vport(handle);
8363
8364         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8365                 vport->rxvlan_cfg.strip_tag1_en = false;
8366                 vport->rxvlan_cfg.strip_tag2_en = enable;
8367         } else {
8368                 vport->rxvlan_cfg.strip_tag1_en = enable;
8369                 vport->rxvlan_cfg.strip_tag2_en = true;
8370         }
8371         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8372         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8373         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8374
8375         return hclge_set_vlan_rx_offload_cfg(vport);
8376 }
8377
8378 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8379                                             u16 port_base_vlan_state,
8380                                             struct hclge_vlan_info *new_info,
8381                                             struct hclge_vlan_info *old_info)
8382 {
8383         struct hclge_dev *hdev = vport->back;
8384         int ret;
8385
8386         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8387                 hclge_rm_vport_all_vlan_table(vport, false);
8388                 return hclge_set_vlan_filter_hw(hdev,
8389                                                  htons(new_info->vlan_proto),
8390                                                  vport->vport_id,
8391                                                  new_info->vlan_tag,
8392                                                  false);
8393         }
8394
8395         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8396                                        vport->vport_id, old_info->vlan_tag,
8397                                        true);
8398         if (ret)
8399                 return ret;
8400
8401         return hclge_add_vport_all_vlan_table(vport);
8402 }
8403
8404 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8405                                     struct hclge_vlan_info *vlan_info)
8406 {
8407         struct hnae3_handle *nic = &vport->nic;
8408         struct hclge_vlan_info *old_vlan_info;
8409         struct hclge_dev *hdev = vport->back;
8410         int ret;
8411
8412         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8413
8414         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8415         if (ret)
8416                 return ret;
8417
8418         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8419                 /* add new VLAN tag */
8420                 ret = hclge_set_vlan_filter_hw(hdev,
8421                                                htons(vlan_info->vlan_proto),
8422                                                vport->vport_id,
8423                                                vlan_info->vlan_tag,
8424                                                false);
8425                 if (ret)
8426                         return ret;
8427
8428                 /* remove old VLAN tag */
8429                 ret = hclge_set_vlan_filter_hw(hdev,
8430                                                htons(old_vlan_info->vlan_proto),
8431                                                vport->vport_id,
8432                                                old_vlan_info->vlan_tag,
8433                                                true);
8434                 if (ret)
8435                         return ret;
8436
8437                 goto update;
8438         }
8439
8440         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8441                                                old_vlan_info);
8442         if (ret)
8443                 return ret;
8444
8445         /* update state only when disable/enable port based VLAN */
8446         vport->port_base_vlan_cfg.state = state;
8447         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8448                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8449         else
8450                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8451
8452 update:
8453         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8454         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8455         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8456
8457         return 0;
8458 }
8459
8460 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8461                                           enum hnae3_port_base_vlan_state state,
8462                                           u16 vlan)
8463 {
8464         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8465                 if (!vlan)
8466                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8467                 else
8468                         return HNAE3_PORT_BASE_VLAN_ENABLE;
8469         } else {
8470                 if (!vlan)
8471                         return HNAE3_PORT_BASE_VLAN_DISABLE;
8472                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8473                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8474                 else
8475                         return HNAE3_PORT_BASE_VLAN_MODIFY;
8476         }
8477 }
8478
8479 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8480                                     u16 vlan, u8 qos, __be16 proto)
8481 {
8482         struct hclge_vport *vport = hclge_get_vport(handle);
8483         struct hclge_dev *hdev = vport->back;
8484         struct hclge_vlan_info vlan_info;
8485         u16 state;
8486         int ret;
8487
8488         if (hdev->pdev->revision == 0x20)
8489                 return -EOPNOTSUPP;
8490
8491         vport = hclge_get_vf_vport(hdev, vfid);
8492         if (!vport)
8493                 return -EINVAL;
8494
8495         /* qos is a 3 bits value, so can not be bigger than 7 */
8496         if (vlan > VLAN_N_VID - 1 || qos > 7)
8497                 return -EINVAL;
8498         if (proto != htons(ETH_P_8021Q))
8499                 return -EPROTONOSUPPORT;
8500
8501         state = hclge_get_port_base_vlan_state(vport,
8502                                                vport->port_base_vlan_cfg.state,
8503                                                vlan);
8504         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8505                 return 0;
8506
8507         vlan_info.vlan_tag = vlan;
8508         vlan_info.qos = qos;
8509         vlan_info.vlan_proto = ntohs(proto);
8510
8511         if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8512                 return hclge_update_port_base_vlan_cfg(vport, state,
8513                                                        &vlan_info);
8514         } else {
8515                 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
8516                                                         vport->vport_id, state,
8517                                                         vlan, qos,
8518                                                         ntohs(proto));
8519                 return ret;
8520         }
8521 }
8522
8523 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
8524                           u16 vlan_id, bool is_kill)
8525 {
8526         struct hclge_vport *vport = hclge_get_vport(handle);
8527         struct hclge_dev *hdev = vport->back;
8528         bool writen_to_tbl = false;
8529         int ret = 0;
8530
8531         /* When device is resetting, firmware is unable to handle
8532          * mailbox. Just record the vlan id, and remove it after
8533          * reset finished.
8534          */
8535         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
8536                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8537                 return -EBUSY;
8538         }
8539
8540         /* when port base vlan enabled, we use port base vlan as the vlan
8541          * filter entry. In this case, we don't update vlan filter table
8542          * when user add new vlan or remove exist vlan, just update the vport
8543          * vlan list. The vlan id in vlan list will be writen in vlan filter
8544          * table until port base vlan disabled
8545          */
8546         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8547                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
8548                                                vlan_id, is_kill);
8549                 writen_to_tbl = true;
8550         }
8551
8552         if (!ret) {
8553                 if (is_kill)
8554                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
8555                 else
8556                         hclge_add_vport_vlan_table(vport, vlan_id,
8557                                                    writen_to_tbl);
8558         } else if (is_kill) {
8559                 /* when remove hw vlan filter failed, record the vlan id,
8560                  * and try to remove it from hw later, to be consistence
8561                  * with stack
8562                  */
8563                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8564         }
8565         return ret;
8566 }
8567
8568 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
8569 {
8570 #define HCLGE_MAX_SYNC_COUNT    60
8571
8572         int i, ret, sync_cnt = 0;
8573         u16 vlan_id;
8574
8575         /* start from vport 1 for PF is always alive */
8576         for (i = 0; i < hdev->num_alloc_vport; i++) {
8577                 struct hclge_vport *vport = &hdev->vport[i];
8578
8579                 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8580                                          VLAN_N_VID);
8581                 while (vlan_id != VLAN_N_VID) {
8582                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8583                                                        vport->vport_id, vlan_id,
8584                                                        true);
8585                         if (ret && ret != -EINVAL)
8586                                 return;
8587
8588                         clear_bit(vlan_id, vport->vlan_del_fail_bmap);
8589                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
8590
8591                         sync_cnt++;
8592                         if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
8593                                 return;
8594
8595                         vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8596                                                  VLAN_N_VID);
8597                 }
8598         }
8599 }
8600
8601 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
8602 {
8603         struct hclge_config_max_frm_size_cmd *req;
8604         struct hclge_desc desc;
8605
8606         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
8607
8608         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
8609         req->max_frm_size = cpu_to_le16(new_mps);
8610         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
8611
8612         return hclge_cmd_send(&hdev->hw, &desc, 1);
8613 }
8614
8615 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
8616 {
8617         struct hclge_vport *vport = hclge_get_vport(handle);
8618
8619         return hclge_set_vport_mtu(vport, new_mtu);
8620 }
8621
8622 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
8623 {
8624         struct hclge_dev *hdev = vport->back;
8625         int i, max_frm_size, ret;
8626
8627         /* HW supprt 2 layer vlan */
8628         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8629         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
8630             max_frm_size > HCLGE_MAC_MAX_FRAME)
8631                 return -EINVAL;
8632
8633         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
8634         mutex_lock(&hdev->vport_lock);
8635         /* VF's mps must fit within hdev->mps */
8636         if (vport->vport_id && max_frm_size > hdev->mps) {
8637                 mutex_unlock(&hdev->vport_lock);
8638                 return -EINVAL;
8639         } else if (vport->vport_id) {
8640                 vport->mps = max_frm_size;
8641                 mutex_unlock(&hdev->vport_lock);
8642                 return 0;
8643         }
8644
8645         /* PF's mps must be greater then VF's mps */
8646         for (i = 1; i < hdev->num_alloc_vport; i++)
8647                 if (max_frm_size < hdev->vport[i].mps) {
8648                         mutex_unlock(&hdev->vport_lock);
8649                         return -EINVAL;
8650                 }
8651
8652         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8653
8654         ret = hclge_set_mac_mtu(hdev, max_frm_size);
8655         if (ret) {
8656                 dev_err(&hdev->pdev->dev,
8657                         "Change mtu fail, ret =%d\n", ret);
8658                 goto out;
8659         }
8660
8661         hdev->mps = max_frm_size;
8662         vport->mps = max_frm_size;
8663
8664         ret = hclge_buffer_alloc(hdev);
8665         if (ret)
8666                 dev_err(&hdev->pdev->dev,
8667                         "Allocate buffer fail, ret =%d\n", ret);
8668
8669 out:
8670         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8671         mutex_unlock(&hdev->vport_lock);
8672         return ret;
8673 }
8674
8675 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
8676                                     bool enable)
8677 {
8678         struct hclge_reset_tqp_queue_cmd *req;
8679         struct hclge_desc desc;
8680         int ret;
8681
8682         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
8683
8684         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8685         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8686         if (enable)
8687                 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
8688
8689         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8690         if (ret) {
8691                 dev_err(&hdev->pdev->dev,
8692                         "Send tqp reset cmd error, status =%d\n", ret);
8693                 return ret;
8694         }
8695
8696         return 0;
8697 }
8698
8699 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8700 {
8701         struct hclge_reset_tqp_queue_cmd *req;
8702         struct hclge_desc desc;
8703         int ret;
8704
8705         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8706
8707         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8708         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8709
8710         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8711         if (ret) {
8712                 dev_err(&hdev->pdev->dev,
8713                         "Get reset status error, status =%d\n", ret);
8714                 return ret;
8715         }
8716
8717         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8718 }
8719
8720 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8721 {
8722         struct hnae3_queue *queue;
8723         struct hclge_tqp *tqp;
8724
8725         queue = handle->kinfo.tqp[queue_id];
8726         tqp = container_of(queue, struct hclge_tqp, q);
8727
8728         return tqp->index;
8729 }
8730
8731 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8732 {
8733         struct hclge_vport *vport = hclge_get_vport(handle);
8734         struct hclge_dev *hdev = vport->back;
8735         int reset_try_times = 0;
8736         int reset_status;
8737         u16 queue_gid;
8738         int ret;
8739
8740         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8741
8742         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8743         if (ret) {
8744                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8745                 return ret;
8746         }
8747
8748         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8749         if (ret) {
8750                 dev_err(&hdev->pdev->dev,
8751                         "Send reset tqp cmd fail, ret = %d\n", ret);
8752                 return ret;
8753         }
8754
8755         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8756                 reset_status = hclge_get_reset_status(hdev, queue_gid);
8757                 if (reset_status)
8758                         break;
8759
8760                 /* Wait for tqp hw reset */
8761                 usleep_range(1000, 1200);
8762         }
8763
8764         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8765                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8766                 return ret;
8767         }
8768
8769         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8770         if (ret)
8771                 dev_err(&hdev->pdev->dev,
8772                         "Deassert the soft reset fail, ret = %d\n", ret);
8773
8774         return ret;
8775 }
8776
8777 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8778 {
8779         struct hclge_dev *hdev = vport->back;
8780         int reset_try_times = 0;
8781         int reset_status;
8782         u16 queue_gid;
8783         int ret;
8784
8785         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8786
8787         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8788         if (ret) {
8789                 dev_warn(&hdev->pdev->dev,
8790                          "Send reset tqp cmd fail, ret = %d\n", ret);
8791                 return;
8792         }
8793
8794         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8795                 reset_status = hclge_get_reset_status(hdev, queue_gid);
8796                 if (reset_status)
8797                         break;
8798
8799                 /* Wait for tqp hw reset */
8800                 usleep_range(1000, 1200);
8801         }
8802
8803         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8804                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8805                 return;
8806         }
8807
8808         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8809         if (ret)
8810                 dev_warn(&hdev->pdev->dev,
8811                          "Deassert the soft reset fail, ret = %d\n", ret);
8812 }
8813
8814 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8815 {
8816         struct hclge_vport *vport = hclge_get_vport(handle);
8817         struct hclge_dev *hdev = vport->back;
8818
8819         return hdev->fw_version;
8820 }
8821
8822 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8823 {
8824         struct phy_device *phydev = hdev->hw.mac.phydev;
8825
8826         if (!phydev)
8827                 return;
8828
8829         phy_set_asym_pause(phydev, rx_en, tx_en);
8830 }
8831
8832 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8833 {
8834         int ret;
8835
8836         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8837                 return 0;
8838
8839         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8840         if (ret)
8841                 dev_err(&hdev->pdev->dev,
8842                         "configure pauseparam error, ret = %d.\n", ret);
8843
8844         return ret;
8845 }
8846
8847 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8848 {
8849         struct phy_device *phydev = hdev->hw.mac.phydev;
8850         u16 remote_advertising = 0;
8851         u16 local_advertising;
8852         u32 rx_pause, tx_pause;
8853         u8 flowctl;
8854
8855         if (!phydev->link || !phydev->autoneg)
8856                 return 0;
8857
8858         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8859
8860         if (phydev->pause)
8861                 remote_advertising = LPA_PAUSE_CAP;
8862
8863         if (phydev->asym_pause)
8864                 remote_advertising |= LPA_PAUSE_ASYM;
8865
8866         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8867                                            remote_advertising);
8868         tx_pause = flowctl & FLOW_CTRL_TX;
8869         rx_pause = flowctl & FLOW_CTRL_RX;
8870
8871         if (phydev->duplex == HCLGE_MAC_HALF) {
8872                 tx_pause = 0;
8873                 rx_pause = 0;
8874         }
8875
8876         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8877 }
8878
8879 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8880                                  u32 *rx_en, u32 *tx_en)
8881 {
8882         struct hclge_vport *vport = hclge_get_vport(handle);
8883         struct hclge_dev *hdev = vport->back;
8884         struct phy_device *phydev = hdev->hw.mac.phydev;
8885
8886         *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8887
8888         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8889                 *rx_en = 0;
8890                 *tx_en = 0;
8891                 return;
8892         }
8893
8894         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8895                 *rx_en = 1;
8896                 *tx_en = 0;
8897         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8898                 *tx_en = 1;
8899                 *rx_en = 0;
8900         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8901                 *rx_en = 1;
8902                 *tx_en = 1;
8903         } else {
8904                 *rx_en = 0;
8905                 *tx_en = 0;
8906         }
8907 }
8908
8909 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
8910                                          u32 rx_en, u32 tx_en)
8911 {
8912         if (rx_en && tx_en)
8913                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
8914         else if (rx_en && !tx_en)
8915                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8916         else if (!rx_en && tx_en)
8917                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8918         else
8919                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
8920
8921         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8922 }
8923
8924 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8925                                 u32 rx_en, u32 tx_en)
8926 {
8927         struct hclge_vport *vport = hclge_get_vport(handle);
8928         struct hclge_dev *hdev = vport->back;
8929         struct phy_device *phydev = hdev->hw.mac.phydev;
8930         u32 fc_autoneg;
8931
8932         if (phydev) {
8933                 fc_autoneg = hclge_get_autoneg(handle);
8934                 if (auto_neg != fc_autoneg) {
8935                         dev_info(&hdev->pdev->dev,
8936                                  "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8937                         return -EOPNOTSUPP;
8938                 }
8939         }
8940
8941         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8942                 dev_info(&hdev->pdev->dev,
8943                          "Priority flow control enabled. Cannot set link flow control.\n");
8944                 return -EOPNOTSUPP;
8945         }
8946
8947         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8948
8949         hclge_record_user_pauseparam(hdev, rx_en, tx_en);
8950
8951         if (!auto_neg)
8952                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8953
8954         if (phydev)
8955                 return phy_start_aneg(phydev);
8956
8957         return -EOPNOTSUPP;
8958 }
8959
8960 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8961                                           u8 *auto_neg, u32 *speed, u8 *duplex)
8962 {
8963         struct hclge_vport *vport = hclge_get_vport(handle);
8964         struct hclge_dev *hdev = vport->back;
8965
8966         if (speed)
8967                 *speed = hdev->hw.mac.speed;
8968         if (duplex)
8969                 *duplex = hdev->hw.mac.duplex;
8970         if (auto_neg)
8971                 *auto_neg = hdev->hw.mac.autoneg;
8972 }
8973
8974 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8975                                  u8 *module_type)
8976 {
8977         struct hclge_vport *vport = hclge_get_vport(handle);
8978         struct hclge_dev *hdev = vport->back;
8979
8980         if (media_type)
8981                 *media_type = hdev->hw.mac.media_type;
8982
8983         if (module_type)
8984                 *module_type = hdev->hw.mac.module_type;
8985 }
8986
8987 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8988                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8989 {
8990         struct hclge_vport *vport = hclge_get_vport(handle);
8991         struct hclge_dev *hdev = vport->back;
8992         struct phy_device *phydev = hdev->hw.mac.phydev;
8993         int mdix_ctrl, mdix, is_resolved;
8994         unsigned int retval;
8995
8996         if (!phydev) {
8997                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8998                 *tp_mdix = ETH_TP_MDI_INVALID;
8999                 return;
9000         }
9001
9002         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
9003
9004         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
9005         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
9006                                     HCLGE_PHY_MDIX_CTRL_S);
9007
9008         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
9009         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
9010         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
9011
9012         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
9013
9014         switch (mdix_ctrl) {
9015         case 0x0:
9016                 *tp_mdix_ctrl = ETH_TP_MDI;
9017                 break;
9018         case 0x1:
9019                 *tp_mdix_ctrl = ETH_TP_MDI_X;
9020                 break;
9021         case 0x3:
9022                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
9023                 break;
9024         default:
9025                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9026                 break;
9027         }
9028
9029         if (!is_resolved)
9030                 *tp_mdix = ETH_TP_MDI_INVALID;
9031         else if (mdix)
9032                 *tp_mdix = ETH_TP_MDI_X;
9033         else
9034                 *tp_mdix = ETH_TP_MDI;
9035 }
9036
9037 static void hclge_info_show(struct hclge_dev *hdev)
9038 {
9039         struct device *dev = &hdev->pdev->dev;
9040
9041         dev_info(dev, "PF info begin:\n");
9042
9043         dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9044         dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9045         dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9046         dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9047         dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9048         dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9049         dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9050         dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9051         dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9052         dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9053         dev_info(dev, "This is %s PF\n",
9054                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9055         dev_info(dev, "DCB %s\n",
9056                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9057         dev_info(dev, "MQPRIO %s\n",
9058                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9059
9060         dev_info(dev, "PF info end.\n");
9061 }
9062
9063 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9064                                           struct hclge_vport *vport)
9065 {
9066         struct hnae3_client *client = vport->nic.client;
9067         struct hclge_dev *hdev = ae_dev->priv;
9068         int rst_cnt = hdev->rst_stats.reset_cnt;
9069         int ret;
9070
9071         ret = client->ops->init_instance(&vport->nic);
9072         if (ret)
9073                 return ret;
9074
9075         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9076         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9077             rst_cnt != hdev->rst_stats.reset_cnt) {
9078                 ret = -EBUSY;
9079                 goto init_nic_err;
9080         }
9081
9082         /* Enable nic hw error interrupts */
9083         ret = hclge_config_nic_hw_error(hdev, true);
9084         if (ret) {
9085                 dev_err(&ae_dev->pdev->dev,
9086                         "fail(%d) to enable hw error interrupts\n", ret);
9087                 goto init_nic_err;
9088         }
9089
9090         hnae3_set_client_init_flag(client, ae_dev, 1);
9091
9092         if (netif_msg_drv(&hdev->vport->nic))
9093                 hclge_info_show(hdev);
9094
9095         return ret;
9096
9097 init_nic_err:
9098         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9099         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9100                 msleep(HCLGE_WAIT_RESET_DONE);
9101
9102         client->ops->uninit_instance(&vport->nic, 0);
9103
9104         return ret;
9105 }
9106
9107 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9108                                            struct hclge_vport *vport)
9109 {
9110         struct hnae3_client *client = vport->roce.client;
9111         struct hclge_dev *hdev = ae_dev->priv;
9112         int rst_cnt;
9113         int ret;
9114
9115         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9116             !hdev->nic_client)
9117                 return 0;
9118
9119         client = hdev->roce_client;
9120         ret = hclge_init_roce_base_info(vport);
9121         if (ret)
9122                 return ret;
9123
9124         rst_cnt = hdev->rst_stats.reset_cnt;
9125         ret = client->ops->init_instance(&vport->roce);
9126         if (ret)
9127                 return ret;
9128
9129         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9130         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9131             rst_cnt != hdev->rst_stats.reset_cnt) {
9132                 ret = -EBUSY;
9133                 goto init_roce_err;
9134         }
9135
9136         /* Enable roce ras interrupts */
9137         ret = hclge_config_rocee_ras_interrupt(hdev, true);
9138         if (ret) {
9139                 dev_err(&ae_dev->pdev->dev,
9140                         "fail(%d) to enable roce ras interrupts\n", ret);
9141                 goto init_roce_err;
9142         }
9143
9144         hnae3_set_client_init_flag(client, ae_dev, 1);
9145
9146         return 0;
9147
9148 init_roce_err:
9149         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9150         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9151                 msleep(HCLGE_WAIT_RESET_DONE);
9152
9153         hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9154
9155         return ret;
9156 }
9157
9158 static int hclge_init_client_instance(struct hnae3_client *client,
9159                                       struct hnae3_ae_dev *ae_dev)
9160 {
9161         struct hclge_dev *hdev = ae_dev->priv;
9162         struct hclge_vport *vport;
9163         int i, ret;
9164
9165         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
9166                 vport = &hdev->vport[i];
9167
9168                 switch (client->type) {
9169                 case HNAE3_CLIENT_KNIC:
9170                         hdev->nic_client = client;
9171                         vport->nic.client = client;
9172                         ret = hclge_init_nic_client_instance(ae_dev, vport);
9173                         if (ret)
9174                                 goto clear_nic;
9175
9176                         ret = hclge_init_roce_client_instance(ae_dev, vport);
9177                         if (ret)
9178                                 goto clear_roce;
9179
9180                         break;
9181                 case HNAE3_CLIENT_ROCE:
9182                         if (hnae3_dev_roce_supported(hdev)) {
9183                                 hdev->roce_client = client;
9184                                 vport->roce.client = client;
9185                         }
9186
9187                         ret = hclge_init_roce_client_instance(ae_dev, vport);
9188                         if (ret)
9189                                 goto clear_roce;
9190
9191                         break;
9192                 default:
9193                         return -EINVAL;
9194                 }
9195         }
9196
9197         return 0;
9198
9199 clear_nic:
9200         hdev->nic_client = NULL;
9201         vport->nic.client = NULL;
9202         return ret;
9203 clear_roce:
9204         hdev->roce_client = NULL;
9205         vport->roce.client = NULL;
9206         return ret;
9207 }
9208
9209 static void hclge_uninit_client_instance(struct hnae3_client *client,
9210                                          struct hnae3_ae_dev *ae_dev)
9211 {
9212         struct hclge_dev *hdev = ae_dev->priv;
9213         struct hclge_vport *vport;
9214         int i;
9215
9216         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9217                 vport = &hdev->vport[i];
9218                 if (hdev->roce_client) {
9219                         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9220                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9221                                 msleep(HCLGE_WAIT_RESET_DONE);
9222
9223                         hdev->roce_client->ops->uninit_instance(&vport->roce,
9224                                                                 0);
9225                         hdev->roce_client = NULL;
9226                         vport->roce.client = NULL;
9227                 }
9228                 if (client->type == HNAE3_CLIENT_ROCE)
9229                         return;
9230                 if (hdev->nic_client && client->ops->uninit_instance) {
9231                         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9232                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9233                                 msleep(HCLGE_WAIT_RESET_DONE);
9234
9235                         client->ops->uninit_instance(&vport->nic, 0);
9236                         hdev->nic_client = NULL;
9237                         vport->nic.client = NULL;
9238                 }
9239         }
9240 }
9241
9242 static int hclge_pci_init(struct hclge_dev *hdev)
9243 {
9244         struct pci_dev *pdev = hdev->pdev;
9245         struct hclge_hw *hw;
9246         int ret;
9247
9248         ret = pci_enable_device(pdev);
9249         if (ret) {
9250                 dev_err(&pdev->dev, "failed to enable PCI device\n");
9251                 return ret;
9252         }
9253
9254         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9255         if (ret) {
9256                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9257                 if (ret) {
9258                         dev_err(&pdev->dev,
9259                                 "can't set consistent PCI DMA");
9260                         goto err_disable_device;
9261                 }
9262                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9263         }
9264
9265         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9266         if (ret) {
9267                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9268                 goto err_disable_device;
9269         }
9270
9271         pci_set_master(pdev);
9272         hw = &hdev->hw;
9273         hw->io_base = pcim_iomap(pdev, 2, 0);
9274         if (!hw->io_base) {
9275                 dev_err(&pdev->dev, "Can't map configuration register space\n");
9276                 ret = -ENOMEM;
9277                 goto err_clr_master;
9278         }
9279
9280         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9281
9282         return 0;
9283 err_clr_master:
9284         pci_clear_master(pdev);
9285         pci_release_regions(pdev);
9286 err_disable_device:
9287         pci_disable_device(pdev);
9288
9289         return ret;
9290 }
9291
9292 static void hclge_pci_uninit(struct hclge_dev *hdev)
9293 {
9294         struct pci_dev *pdev = hdev->pdev;
9295
9296         pcim_iounmap(pdev, hdev->hw.io_base);
9297         pci_free_irq_vectors(pdev);
9298         pci_clear_master(pdev);
9299         pci_release_mem_regions(pdev);
9300         pci_disable_device(pdev);
9301 }
9302
9303 static void hclge_state_init(struct hclge_dev *hdev)
9304 {
9305         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9306         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9307         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9308         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9309         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
9310         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9311         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9312 }
9313
9314 static void hclge_state_uninit(struct hclge_dev *hdev)
9315 {
9316         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9317         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9318
9319         if (hdev->reset_timer.function)
9320                 del_timer_sync(&hdev->reset_timer);
9321         if (hdev->service_task.work.func)
9322                 cancel_delayed_work_sync(&hdev->service_task);
9323 }
9324
9325 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9326 {
9327 #define HCLGE_FLR_WAIT_MS       100
9328 #define HCLGE_FLR_WAIT_CNT      50
9329         struct hclge_dev *hdev = ae_dev->priv;
9330         int cnt = 0;
9331
9332         clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
9333         clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
9334         set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
9335         hclge_reset_event(hdev->pdev, NULL);
9336
9337         while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
9338                cnt++ < HCLGE_FLR_WAIT_CNT)
9339                 msleep(HCLGE_FLR_WAIT_MS);
9340
9341         if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
9342                 dev_err(&hdev->pdev->dev,
9343                         "flr wait down timeout: %d\n", cnt);
9344 }
9345
9346 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9347 {
9348         struct hclge_dev *hdev = ae_dev->priv;
9349
9350         set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
9351 }
9352
9353 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9354 {
9355         u16 i;
9356
9357         for (i = 0; i < hdev->num_alloc_vport; i++) {
9358                 struct hclge_vport *vport = &hdev->vport[i];
9359                 int ret;
9360
9361                  /* Send cmd to clear VF's FUNC_RST_ING */
9362                 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9363                 if (ret)
9364                         dev_warn(&hdev->pdev->dev,
9365                                  "clear vf(%u) rst failed %d!\n",
9366                                  vport->vport_id, ret);
9367         }
9368 }
9369
9370 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9371 {
9372         struct pci_dev *pdev = ae_dev->pdev;
9373         struct hclge_dev *hdev;
9374         int ret;
9375
9376         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9377         if (!hdev) {
9378                 ret = -ENOMEM;
9379                 goto out;
9380         }
9381
9382         hdev->pdev = pdev;
9383         hdev->ae_dev = ae_dev;
9384         hdev->reset_type = HNAE3_NONE_RESET;
9385         hdev->reset_level = HNAE3_FUNC_RESET;
9386         ae_dev->priv = hdev;
9387
9388         /* HW supprt 2 layer vlan */
9389         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9390
9391         mutex_init(&hdev->vport_lock);
9392         spin_lock_init(&hdev->fd_rule_lock);
9393
9394         ret = hclge_pci_init(hdev);
9395         if (ret) {
9396                 dev_err(&pdev->dev, "PCI init failed\n");
9397                 goto out;
9398         }
9399
9400         /* Firmware command queue initialize */
9401         ret = hclge_cmd_queue_init(hdev);
9402         if (ret) {
9403                 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
9404                 goto err_pci_uninit;
9405         }
9406
9407         /* Firmware command initialize */
9408         ret = hclge_cmd_init(hdev);
9409         if (ret)
9410                 goto err_cmd_uninit;
9411
9412         ret = hclge_get_cap(hdev);
9413         if (ret) {
9414                 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
9415                         ret);
9416                 goto err_cmd_uninit;
9417         }
9418
9419         ret = hclge_configure(hdev);
9420         if (ret) {
9421                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9422                 goto err_cmd_uninit;
9423         }
9424
9425         ret = hclge_init_msi(hdev);
9426         if (ret) {
9427                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9428                 goto err_cmd_uninit;
9429         }
9430
9431         ret = hclge_misc_irq_init(hdev);
9432         if (ret) {
9433                 dev_err(&pdev->dev,
9434                         "Misc IRQ(vector0) init error, ret = %d.\n",
9435                         ret);
9436                 goto err_msi_uninit;
9437         }
9438
9439         ret = hclge_alloc_tqps(hdev);
9440         if (ret) {
9441                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9442                 goto err_msi_irq_uninit;
9443         }
9444
9445         ret = hclge_alloc_vport(hdev);
9446         if (ret) {
9447                 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
9448                 goto err_msi_irq_uninit;
9449         }
9450
9451         ret = hclge_map_tqp(hdev);
9452         if (ret) {
9453                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9454                 goto err_msi_irq_uninit;
9455         }
9456
9457         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9458                 ret = hclge_mac_mdio_config(hdev);
9459                 if (ret) {
9460                         dev_err(&hdev->pdev->dev,
9461                                 "mdio config fail ret=%d\n", ret);
9462                         goto err_msi_irq_uninit;
9463                 }
9464         }
9465
9466         ret = hclge_init_umv_space(hdev);
9467         if (ret) {
9468                 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
9469                 goto err_mdiobus_unreg;
9470         }
9471
9472         ret = hclge_mac_init(hdev);
9473         if (ret) {
9474                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9475                 goto err_mdiobus_unreg;
9476         }
9477
9478         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9479         if (ret) {
9480                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9481                 goto err_mdiobus_unreg;
9482         }
9483
9484         ret = hclge_config_gro(hdev, true);
9485         if (ret)
9486                 goto err_mdiobus_unreg;
9487
9488         ret = hclge_init_vlan_config(hdev);
9489         if (ret) {
9490                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9491                 goto err_mdiobus_unreg;
9492         }
9493
9494         ret = hclge_tm_schd_init(hdev);
9495         if (ret) {
9496                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
9497                 goto err_mdiobus_unreg;
9498         }
9499
9500         hclge_rss_init_cfg(hdev);
9501         ret = hclge_rss_init_hw(hdev);
9502         if (ret) {
9503                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9504                 goto err_mdiobus_unreg;
9505         }
9506
9507         ret = init_mgr_tbl(hdev);
9508         if (ret) {
9509                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
9510                 goto err_mdiobus_unreg;
9511         }
9512
9513         ret = hclge_init_fd_config(hdev);
9514         if (ret) {
9515                 dev_err(&pdev->dev,
9516                         "fd table init fail, ret=%d\n", ret);
9517                 goto err_mdiobus_unreg;
9518         }
9519
9520         INIT_KFIFO(hdev->mac_tnl_log);
9521
9522         hclge_dcb_ops_set(hdev);
9523
9524         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
9525         INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
9526
9527         /* Setup affinity after service timer setup because add_timer_on
9528          * is called in affinity notify.
9529          */
9530         hclge_misc_affinity_setup(hdev);
9531
9532         hclge_clear_all_event_cause(hdev);
9533         hclge_clear_resetting_state(hdev);
9534
9535         /* Log and clear the hw errors those already occurred */
9536         hclge_handle_all_hns_hw_errors(ae_dev);
9537
9538         /* request delayed reset for the error recovery because an immediate
9539          * global reset on a PF affecting pending initialization of other PFs
9540          */
9541         if (ae_dev->hw_err_reset_req) {
9542                 enum hnae3_reset_type reset_level;
9543
9544                 reset_level = hclge_get_reset_level(ae_dev,
9545                                                     &ae_dev->hw_err_reset_req);
9546                 hclge_set_def_reset_request(ae_dev, reset_level);
9547                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9548         }
9549
9550         /* Enable MISC vector(vector0) */
9551         hclge_enable_vector(&hdev->misc_vector, true);
9552
9553         hclge_state_init(hdev);
9554         hdev->last_reset_time = jiffies;
9555
9556         dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
9557                  HCLGE_DRIVER_NAME);
9558
9559         hclge_task_schedule(hdev, round_jiffies_relative(HZ));
9560
9561         return 0;
9562
9563 err_mdiobus_unreg:
9564         if (hdev->hw.mac.phydev)
9565                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
9566 err_msi_irq_uninit:
9567         hclge_misc_irq_uninit(hdev);
9568 err_msi_uninit:
9569         pci_free_irq_vectors(pdev);
9570 err_cmd_uninit:
9571         hclge_cmd_uninit(hdev);
9572 err_pci_uninit:
9573         pcim_iounmap(pdev, hdev->hw.io_base);
9574         pci_clear_master(pdev);
9575         pci_release_regions(pdev);
9576         pci_disable_device(pdev);
9577 out:
9578         return ret;
9579 }
9580
9581 static void hclge_stats_clear(struct hclge_dev *hdev)
9582 {
9583         memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
9584 }
9585
9586 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9587 {
9588         return hclge_config_switch_param(hdev, vf, enable,
9589                                          HCLGE_SWITCH_ANTI_SPOOF_MASK);
9590 }
9591
9592 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9593 {
9594         return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9595                                           HCLGE_FILTER_FE_NIC_INGRESS_B,
9596                                           enable, vf);
9597 }
9598
9599 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
9600 {
9601         int ret;
9602
9603         ret = hclge_set_mac_spoofchk(hdev, vf, enable);
9604         if (ret) {
9605                 dev_err(&hdev->pdev->dev,
9606                         "Set vf %d mac spoof check %s failed, ret=%d\n",
9607                         vf, enable ? "on" : "off", ret);
9608                 return ret;
9609         }
9610
9611         ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
9612         if (ret)
9613                 dev_err(&hdev->pdev->dev,
9614                         "Set vf %d vlan spoof check %s failed, ret=%d\n",
9615                         vf, enable ? "on" : "off", ret);
9616
9617         return ret;
9618 }
9619
9620 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
9621                                  bool enable)
9622 {
9623         struct hclge_vport *vport = hclge_get_vport(handle);
9624         struct hclge_dev *hdev = vport->back;
9625         u32 new_spoofchk = enable ? 1 : 0;
9626         int ret;
9627
9628         if (hdev->pdev->revision == 0x20)
9629                 return -EOPNOTSUPP;
9630
9631         vport = hclge_get_vf_vport(hdev, vf);
9632         if (!vport)
9633                 return -EINVAL;
9634
9635         if (vport->vf_info.spoofchk == new_spoofchk)
9636                 return 0;
9637
9638         if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
9639                 dev_warn(&hdev->pdev->dev,
9640                          "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
9641                          vf);
9642         else if (enable && hclge_is_umv_space_full(vport))
9643                 dev_warn(&hdev->pdev->dev,
9644                          "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
9645                          vf);
9646
9647         ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
9648         if (ret)
9649                 return ret;
9650
9651         vport->vf_info.spoofchk = new_spoofchk;
9652         return 0;
9653 }
9654
9655 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
9656 {
9657         struct hclge_vport *vport = hdev->vport;
9658         int ret;
9659         int i;
9660
9661         if (hdev->pdev->revision == 0x20)
9662                 return 0;
9663
9664         /* resume the vf spoof check state after reset */
9665         for (i = 0; i < hdev->num_alloc_vport; i++) {
9666                 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
9667                                                vport->vf_info.spoofchk);
9668                 if (ret)
9669                         return ret;
9670
9671                 vport++;
9672         }
9673
9674         return 0;
9675 }
9676
9677 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
9678 {
9679         struct hclge_vport *vport = hclge_get_vport(handle);
9680         struct hclge_dev *hdev = vport->back;
9681         u32 new_trusted = enable ? 1 : 0;
9682         bool en_bc_pmc;
9683         int ret;
9684
9685         vport = hclge_get_vf_vport(hdev, vf);
9686         if (!vport)
9687                 return -EINVAL;
9688
9689         if (vport->vf_info.trusted == new_trusted)
9690                 return 0;
9691
9692         /* Disable promisc mode for VF if it is not trusted any more. */
9693         if (!enable && vport->vf_info.promisc_enable) {
9694                 en_bc_pmc = hdev->pdev->revision != 0x20;
9695                 ret = hclge_set_vport_promisc_mode(vport, false, false,
9696                                                    en_bc_pmc);
9697                 if (ret)
9698                         return ret;
9699                 vport->vf_info.promisc_enable = 0;
9700                 hclge_inform_vf_promisc_info(vport);
9701         }
9702
9703         vport->vf_info.trusted = new_trusted;
9704
9705         return 0;
9706 }
9707
9708 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
9709 {
9710         int ret;
9711         int vf;
9712
9713         /* reset vf rate to default value */
9714         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9715                 struct hclge_vport *vport = &hdev->vport[vf];
9716
9717                 vport->vf_info.max_tx_rate = 0;
9718                 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
9719                 if (ret)
9720                         dev_err(&hdev->pdev->dev,
9721                                 "vf%d failed to reset to default, ret=%d\n",
9722                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
9723         }
9724 }
9725
9726 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
9727                                      int min_tx_rate, int max_tx_rate)
9728 {
9729         if (min_tx_rate != 0 ||
9730             max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
9731                 dev_err(&hdev->pdev->dev,
9732                         "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
9733                         min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
9734                 return -EINVAL;
9735         }
9736
9737         return 0;
9738 }
9739
9740 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
9741                              int min_tx_rate, int max_tx_rate, bool force)
9742 {
9743         struct hclge_vport *vport = hclge_get_vport(handle);
9744         struct hclge_dev *hdev = vport->back;
9745         int ret;
9746
9747         ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
9748         if (ret)
9749                 return ret;
9750
9751         vport = hclge_get_vf_vport(hdev, vf);
9752         if (!vport)
9753                 return -EINVAL;
9754
9755         if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
9756                 return 0;
9757
9758         ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
9759         if (ret)
9760                 return ret;
9761
9762         vport->vf_info.max_tx_rate = max_tx_rate;
9763
9764         return 0;
9765 }
9766
9767 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
9768 {
9769         struct hnae3_handle *handle = &hdev->vport->nic;
9770         struct hclge_vport *vport;
9771         int ret;
9772         int vf;
9773
9774         /* resume the vf max_tx_rate after reset */
9775         for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
9776                 vport = hclge_get_vf_vport(hdev, vf);
9777                 if (!vport)
9778                         return -EINVAL;
9779
9780                 /* zero means max rate, after reset, firmware already set it to
9781                  * max rate, so just continue.
9782                  */
9783                 if (!vport->vf_info.max_tx_rate)
9784                         continue;
9785
9786                 ret = hclge_set_vf_rate(handle, vf, 0,
9787                                         vport->vf_info.max_tx_rate, true);
9788                 if (ret) {
9789                         dev_err(&hdev->pdev->dev,
9790                                 "vf%d failed to resume tx_rate:%u, ret=%d\n",
9791                                 vf, vport->vf_info.max_tx_rate, ret);
9792                         return ret;
9793                 }
9794         }
9795
9796         return 0;
9797 }
9798
9799 static void hclge_reset_vport_state(struct hclge_dev *hdev)
9800 {
9801         struct hclge_vport *vport = hdev->vport;
9802         int i;
9803
9804         for (i = 0; i < hdev->num_alloc_vport; i++) {
9805                 hclge_vport_stop(vport);
9806                 vport++;
9807         }
9808 }
9809
9810 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
9811 {
9812         struct hclge_dev *hdev = ae_dev->priv;
9813         struct pci_dev *pdev = ae_dev->pdev;
9814         int ret;
9815
9816         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9817
9818         hclge_stats_clear(hdev);
9819         memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
9820         memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
9821
9822         ret = hclge_cmd_init(hdev);
9823         if (ret) {
9824                 dev_err(&pdev->dev, "Cmd queue init failed\n");
9825                 return ret;
9826         }
9827
9828         ret = hclge_map_tqp(hdev);
9829         if (ret) {
9830                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9831                 return ret;
9832         }
9833
9834         hclge_reset_umv_space(hdev);
9835
9836         ret = hclge_mac_init(hdev);
9837         if (ret) {
9838                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9839                 return ret;
9840         }
9841
9842         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9843         if (ret) {
9844                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9845                 return ret;
9846         }
9847
9848         ret = hclge_config_gro(hdev, true);
9849         if (ret)
9850                 return ret;
9851
9852         ret = hclge_init_vlan_config(hdev);
9853         if (ret) {
9854                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9855                 return ret;
9856         }
9857
9858         ret = hclge_tm_init_hw(hdev, true);
9859         if (ret) {
9860                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
9861                 return ret;
9862         }
9863
9864         ret = hclge_rss_init_hw(hdev);
9865         if (ret) {
9866                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9867                 return ret;
9868         }
9869
9870         ret = hclge_init_fd_config(hdev);
9871         if (ret) {
9872                 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
9873                 return ret;
9874         }
9875
9876         /* Log and clear the hw errors those already occurred */
9877         hclge_handle_all_hns_hw_errors(ae_dev);
9878
9879         /* Re-enable the hw error interrupts because
9880          * the interrupts get disabled on global reset.
9881          */
9882         ret = hclge_config_nic_hw_error(hdev, true);
9883         if (ret) {
9884                 dev_err(&pdev->dev,
9885                         "fail(%d) to re-enable NIC hw error interrupts\n",
9886                         ret);
9887                 return ret;
9888         }
9889
9890         if (hdev->roce_client) {
9891                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9892                 if (ret) {
9893                         dev_err(&pdev->dev,
9894                                 "fail(%d) to re-enable roce ras interrupts\n",
9895                                 ret);
9896                         return ret;
9897                 }
9898         }
9899
9900         hclge_reset_vport_state(hdev);
9901         ret = hclge_reset_vport_spoofchk(hdev);
9902         if (ret)
9903                 return ret;
9904
9905         ret = hclge_resume_vf_rate(hdev);
9906         if (ret)
9907                 return ret;
9908
9909         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
9910                  HCLGE_DRIVER_NAME);
9911
9912         return 0;
9913 }
9914
9915 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
9916 {
9917         struct hclge_dev *hdev = ae_dev->priv;
9918         struct hclge_mac *mac = &hdev->hw.mac;
9919
9920         hclge_reset_vf_rate(hdev);
9921         hclge_misc_affinity_teardown(hdev);
9922         hclge_state_uninit(hdev);
9923
9924         if (mac->phydev)
9925                 mdiobus_unregister(mac->mdio_bus);
9926
9927         hclge_uninit_umv_space(hdev);
9928
9929         /* Disable MISC vector(vector0) */
9930         hclge_enable_vector(&hdev->misc_vector, false);
9931         synchronize_irq(hdev->misc_vector.vector_irq);
9932
9933         /* Disable all hw interrupts */
9934         hclge_config_mac_tnl_int(hdev, false);
9935         hclge_config_nic_hw_error(hdev, false);
9936         hclge_config_rocee_ras_interrupt(hdev, false);
9937
9938         hclge_cmd_uninit(hdev);
9939         hclge_misc_irq_uninit(hdev);
9940         hclge_pci_uninit(hdev);
9941         mutex_destroy(&hdev->vport_lock);
9942         hclge_uninit_vport_mac_table(hdev);
9943         hclge_uninit_vport_vlan_table(hdev);
9944         ae_dev->priv = NULL;
9945 }
9946
9947 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9948 {
9949         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9950         struct hclge_vport *vport = hclge_get_vport(handle);
9951         struct hclge_dev *hdev = vport->back;
9952
9953         return min_t(u32, hdev->rss_size_max,
9954                      vport->alloc_tqps / kinfo->num_tc);
9955 }
9956
9957 static void hclge_get_channels(struct hnae3_handle *handle,
9958                                struct ethtool_channels *ch)
9959 {
9960         ch->max_combined = hclge_get_max_channels(handle);
9961         ch->other_count = 1;
9962         ch->max_other = 1;
9963         ch->combined_count = handle->kinfo.rss_size;
9964 }
9965
9966 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9967                                         u16 *alloc_tqps, u16 *max_rss_size)
9968 {
9969         struct hclge_vport *vport = hclge_get_vport(handle);
9970         struct hclge_dev *hdev = vport->back;
9971
9972         *alloc_tqps = vport->alloc_tqps;
9973         *max_rss_size = hdev->rss_size_max;
9974 }
9975
9976 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9977                               bool rxfh_configured)
9978 {
9979         struct hclge_vport *vport = hclge_get_vport(handle);
9980         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9981         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9982         struct hclge_dev *hdev = vport->back;
9983         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
9984         u16 cur_rss_size = kinfo->rss_size;
9985         u16 cur_tqps = kinfo->num_tqps;
9986         u16 tc_valid[HCLGE_MAX_TC_NUM];
9987         u16 roundup_size;
9988         u32 *rss_indir;
9989         unsigned int i;
9990         int ret;
9991
9992         kinfo->req_rss_size = new_tqps_num;
9993
9994         ret = hclge_tm_vport_map_update(hdev);
9995         if (ret) {
9996                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
9997                 return ret;
9998         }
9999
10000         roundup_size = roundup_pow_of_two(kinfo->rss_size);
10001         roundup_size = ilog2(roundup_size);
10002         /* Set the RSS TC mode according to the new RSS size */
10003         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
10004                 tc_valid[i] = 0;
10005
10006                 if (!(hdev->hw_tc_map & BIT(i)))
10007                         continue;
10008
10009                 tc_valid[i] = 1;
10010                 tc_size[i] = roundup_size;
10011                 tc_offset[i] = kinfo->rss_size * i;
10012         }
10013         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
10014         if (ret)
10015                 return ret;
10016
10017         /* RSS indirection table has been configuared by user */
10018         if (rxfh_configured)
10019                 goto out;
10020
10021         /* Reinitializes the rss indirect table according to the new RSS size */
10022         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10023         if (!rss_indir)
10024                 return -ENOMEM;
10025
10026         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10027                 rss_indir[i] = i % kinfo->rss_size;
10028
10029         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10030         if (ret)
10031                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10032                         ret);
10033
10034         kfree(rss_indir);
10035
10036 out:
10037         if (!ret)
10038                 dev_info(&hdev->pdev->dev,
10039                          "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10040                          cur_rss_size, kinfo->rss_size,
10041                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
10042
10043         return ret;
10044 }
10045
10046 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10047                               u32 *regs_num_64_bit)
10048 {
10049         struct hclge_desc desc;
10050         u32 total_num;
10051         int ret;
10052
10053         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10054         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10055         if (ret) {
10056                 dev_err(&hdev->pdev->dev,
10057                         "Query register number cmd failed, ret = %d.\n", ret);
10058                 return ret;
10059         }
10060
10061         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
10062         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
10063
10064         total_num = *regs_num_32_bit + *regs_num_64_bit;
10065         if (!total_num)
10066                 return -EINVAL;
10067
10068         return 0;
10069 }
10070
10071 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10072                                  void *data)
10073 {
10074 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10075 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10076
10077         struct hclge_desc *desc;
10078         u32 *reg_val = data;
10079         __le32 *desc_data;
10080         int nodata_num;
10081         int cmd_num;
10082         int i, k, n;
10083         int ret;
10084
10085         if (regs_num == 0)
10086                 return 0;
10087
10088         nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10089         cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10090                                HCLGE_32_BIT_REG_RTN_DATANUM);
10091         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10092         if (!desc)
10093                 return -ENOMEM;
10094
10095         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10096         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10097         if (ret) {
10098                 dev_err(&hdev->pdev->dev,
10099                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
10100                 kfree(desc);
10101                 return ret;
10102         }
10103
10104         for (i = 0; i < cmd_num; i++) {
10105                 if (i == 0) {
10106                         desc_data = (__le32 *)(&desc[i].data[0]);
10107                         n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10108                 } else {
10109                         desc_data = (__le32 *)(&desc[i]);
10110                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
10111                 }
10112                 for (k = 0; k < n; k++) {
10113                         *reg_val++ = le32_to_cpu(*desc_data++);
10114
10115                         regs_num--;
10116                         if (!regs_num)
10117                                 break;
10118                 }
10119         }
10120
10121         kfree(desc);
10122         return 0;
10123 }
10124
10125 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10126                                  void *data)
10127 {
10128 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10129 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10130
10131         struct hclge_desc *desc;
10132         u64 *reg_val = data;
10133         __le64 *desc_data;
10134         int nodata_len;
10135         int cmd_num;
10136         int i, k, n;
10137         int ret;
10138
10139         if (regs_num == 0)
10140                 return 0;
10141
10142         nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10143         cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10144                                HCLGE_64_BIT_REG_RTN_DATANUM);
10145         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10146         if (!desc)
10147                 return -ENOMEM;
10148
10149         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10150         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10151         if (ret) {
10152                 dev_err(&hdev->pdev->dev,
10153                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
10154                 kfree(desc);
10155                 return ret;
10156         }
10157
10158         for (i = 0; i < cmd_num; i++) {
10159                 if (i == 0) {
10160                         desc_data = (__le64 *)(&desc[i].data[0]);
10161                         n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10162                 } else {
10163                         desc_data = (__le64 *)(&desc[i]);
10164                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
10165                 }
10166                 for (k = 0; k < n; k++) {
10167                         *reg_val++ = le64_to_cpu(*desc_data++);
10168
10169                         regs_num--;
10170                         if (!regs_num)
10171                                 break;
10172                 }
10173         }
10174
10175         kfree(desc);
10176         return 0;
10177 }
10178
10179 #define MAX_SEPARATE_NUM        4
10180 #define SEPARATOR_VALUE         0xFDFCFBFA
10181 #define REG_NUM_PER_LINE        4
10182 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
10183 #define REG_SEPARATOR_LINE      1
10184 #define REG_NUM_REMAIN_MASK     3
10185 #define BD_LIST_MAX_NUM         30
10186
10187 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10188 {
10189         /*prepare 4 commands to query DFX BD number*/
10190         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
10191         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10192         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
10193         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10194         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
10195         desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10196         hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
10197
10198         return hclge_cmd_send(&hdev->hw, desc, 4);
10199 }
10200
10201 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10202                                     int *bd_num_list,
10203                                     u32 type_num)
10204 {
10205 #define HCLGE_DFX_REG_BD_NUM    4
10206
10207         u32 entries_per_desc, desc_index, index, offset, i;
10208         struct hclge_desc desc[HCLGE_DFX_REG_BD_NUM];
10209         int ret;
10210
10211         ret = hclge_query_bd_num_cmd_send(hdev, desc);
10212         if (ret) {
10213                 dev_err(&hdev->pdev->dev,
10214                         "Get dfx bd num fail, status is %d.\n", ret);
10215                 return ret;
10216         }
10217
10218         entries_per_desc = ARRAY_SIZE(desc[0].data);
10219         for (i = 0; i < type_num; i++) {
10220                 offset = hclge_dfx_bd_offset_list[i];
10221                 index = offset % entries_per_desc;
10222                 desc_index = offset / entries_per_desc;
10223                 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10224         }
10225
10226         return ret;
10227 }
10228
10229 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10230                                   struct hclge_desc *desc_src, int bd_num,
10231                                   enum hclge_opcode_type cmd)
10232 {
10233         struct hclge_desc *desc = desc_src;
10234         int i, ret;
10235
10236         hclge_cmd_setup_basic_desc(desc, cmd, true);
10237         for (i = 0; i < bd_num - 1; i++) {
10238                 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10239                 desc++;
10240                 hclge_cmd_setup_basic_desc(desc, cmd, true);
10241         }
10242
10243         desc = desc_src;
10244         ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10245         if (ret)
10246                 dev_err(&hdev->pdev->dev,
10247                         "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10248                         cmd, ret);
10249
10250         return ret;
10251 }
10252
10253 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10254                                     void *data)
10255 {
10256         int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10257         struct hclge_desc *desc = desc_src;
10258         u32 *reg = data;
10259
10260         entries_per_desc = ARRAY_SIZE(desc->data);
10261         reg_num = entries_per_desc * bd_num;
10262         separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10263         for (i = 0; i < reg_num; i++) {
10264                 index = i % entries_per_desc;
10265                 desc_index = i / entries_per_desc;
10266                 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10267         }
10268         for (i = 0; i < separator_num; i++)
10269                 *reg++ = SEPARATOR_VALUE;
10270
10271         return reg_num + separator_num;
10272 }
10273
10274 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10275 {
10276         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10277         int data_len_per_desc, data_len, bd_num, i;
10278         int bd_num_list[BD_LIST_MAX_NUM];
10279         int ret;
10280
10281         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10282         if (ret) {
10283                 dev_err(&hdev->pdev->dev,
10284                         "Get dfx reg bd num fail, status is %d.\n", ret);
10285                 return ret;
10286         }
10287
10288         data_len_per_desc = sizeof_field(struct hclge_desc, data);
10289         *len = 0;
10290         for (i = 0; i < dfx_reg_type_num; i++) {
10291                 bd_num = bd_num_list[i];
10292                 data_len = data_len_per_desc * bd_num;
10293                 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10294         }
10295
10296         return ret;
10297 }
10298
10299 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10300 {
10301         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10302         int bd_num, bd_num_max, buf_len, i;
10303         int bd_num_list[BD_LIST_MAX_NUM];
10304         struct hclge_desc *desc_src;
10305         u32 *reg = data;
10306         int ret;
10307
10308         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10309         if (ret) {
10310                 dev_err(&hdev->pdev->dev,
10311                         "Get dfx reg bd num fail, status is %d.\n", ret);
10312                 return ret;
10313         }
10314
10315         bd_num_max = bd_num_list[0];
10316         for (i = 1; i < dfx_reg_type_num; i++)
10317                 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10318
10319         buf_len = sizeof(*desc_src) * bd_num_max;
10320         desc_src = kzalloc(buf_len, GFP_KERNEL);
10321         if (!desc_src) {
10322                 dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__);
10323                 return -ENOMEM;
10324         }
10325
10326         for (i = 0; i < dfx_reg_type_num; i++) {
10327                 bd_num = bd_num_list[i];
10328                 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10329                                              hclge_dfx_reg_opcode_list[i]);
10330                 if (ret) {
10331                         dev_err(&hdev->pdev->dev,
10332                                 "Get dfx reg fail, status is %d.\n", ret);
10333                         break;
10334                 }
10335
10336                 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10337         }
10338
10339         kfree(desc_src);
10340         return ret;
10341 }
10342
10343 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10344                               struct hnae3_knic_private_info *kinfo)
10345 {
10346 #define HCLGE_RING_REG_OFFSET           0x200
10347 #define HCLGE_RING_INT_REG_OFFSET       0x4
10348
10349         int i, j, reg_num, separator_num;
10350         int data_num_sum;
10351         u32 *reg = data;
10352
10353         /* fetching per-PF registers valus from PF PCIe register space */
10354         reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
10355         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10356         for (i = 0; i < reg_num; i++)
10357                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
10358         for (i = 0; i < separator_num; i++)
10359                 *reg++ = SEPARATOR_VALUE;
10360         data_num_sum = reg_num + separator_num;
10361
10362         reg_num = ARRAY_SIZE(common_reg_addr_list);
10363         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10364         for (i = 0; i < reg_num; i++)
10365                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
10366         for (i = 0; i < separator_num; i++)
10367                 *reg++ = SEPARATOR_VALUE;
10368         data_num_sum += reg_num + separator_num;
10369
10370         reg_num = ARRAY_SIZE(ring_reg_addr_list);
10371         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10372         for (j = 0; j < kinfo->num_tqps; j++) {
10373                 for (i = 0; i < reg_num; i++)
10374                         *reg++ = hclge_read_dev(&hdev->hw,
10375                                                 ring_reg_addr_list[i] +
10376                                                 HCLGE_RING_REG_OFFSET * j);
10377                 for (i = 0; i < separator_num; i++)
10378                         *reg++ = SEPARATOR_VALUE;
10379         }
10380         data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
10381
10382         reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
10383         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10384         for (j = 0; j < hdev->num_msi_used - 1; j++) {
10385                 for (i = 0; i < reg_num; i++)
10386                         *reg++ = hclge_read_dev(&hdev->hw,
10387                                                 tqp_intr_reg_addr_list[i] +
10388                                                 HCLGE_RING_INT_REG_OFFSET * j);
10389                 for (i = 0; i < separator_num; i++)
10390                         *reg++ = SEPARATOR_VALUE;
10391         }
10392         data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
10393
10394         return data_num_sum;
10395 }
10396
10397 static int hclge_get_regs_len(struct hnae3_handle *handle)
10398 {
10399         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
10400         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10401         struct hclge_vport *vport = hclge_get_vport(handle);
10402         struct hclge_dev *hdev = vport->back;
10403         int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
10404         int regs_lines_32_bit, regs_lines_64_bit;
10405         int ret;
10406
10407         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10408         if (ret) {
10409                 dev_err(&hdev->pdev->dev,
10410                         "Get register number failed, ret = %d.\n", ret);
10411                 return ret;
10412         }
10413
10414         ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
10415         if (ret) {
10416                 dev_err(&hdev->pdev->dev,
10417                         "Get dfx reg len failed, ret = %d.\n", ret);
10418                 return ret;
10419         }
10420
10421         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
10422                 REG_SEPARATOR_LINE;
10423         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
10424                 REG_SEPARATOR_LINE;
10425         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10426                 REG_SEPARATOR_LINE;
10427         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10428                 REG_SEPARATOR_LINE;
10429         regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10430                 REG_SEPARATOR_LINE;
10431         regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10432                 REG_SEPARATOR_LINE;
10433
10434         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10435                 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10436                 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10437 }
10438
10439 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10440                            void *data)
10441 {
10442         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10443         struct hclge_vport *vport = hclge_get_vport(handle);
10444         struct hclge_dev *hdev = vport->back;
10445         u32 regs_num_32_bit, regs_num_64_bit;
10446         int i, reg_num, separator_num, ret;
10447         u32 *reg = data;
10448
10449         *version = hdev->fw_version;
10450
10451         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10452         if (ret) {
10453                 dev_err(&hdev->pdev->dev,
10454                         "Get register number failed, ret = %d.\n", ret);
10455                 return;
10456         }
10457
10458         reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
10459
10460         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
10461         if (ret) {
10462                 dev_err(&hdev->pdev->dev,
10463                         "Get 32 bit register failed, ret = %d.\n", ret);
10464                 return;
10465         }
10466         reg_num = regs_num_32_bit;
10467         reg += reg_num;
10468         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10469         for (i = 0; i < separator_num; i++)
10470                 *reg++ = SEPARATOR_VALUE;
10471
10472         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
10473         if (ret) {
10474                 dev_err(&hdev->pdev->dev,
10475                         "Get 64 bit register failed, ret = %d.\n", ret);
10476                 return;
10477         }
10478         reg_num = regs_num_64_bit * 2;
10479         reg += reg_num;
10480         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10481         for (i = 0; i < separator_num; i++)
10482                 *reg++ = SEPARATOR_VALUE;
10483
10484         ret = hclge_get_dfx_reg(hdev, reg);
10485         if (ret)
10486                 dev_err(&hdev->pdev->dev,
10487                         "Get dfx register failed, ret = %d.\n", ret);
10488 }
10489
10490 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
10491 {
10492         struct hclge_set_led_state_cmd *req;
10493         struct hclge_desc desc;
10494         int ret;
10495
10496         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
10497
10498         req = (struct hclge_set_led_state_cmd *)desc.data;
10499         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
10500                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
10501
10502         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10503         if (ret)
10504                 dev_err(&hdev->pdev->dev,
10505                         "Send set led state cmd error, ret =%d\n", ret);
10506
10507         return ret;
10508 }
10509
10510 enum hclge_led_status {
10511         HCLGE_LED_OFF,
10512         HCLGE_LED_ON,
10513         HCLGE_LED_NO_CHANGE = 0xFF,
10514 };
10515
10516 static int hclge_set_led_id(struct hnae3_handle *handle,
10517                             enum ethtool_phys_id_state status)
10518 {
10519         struct hclge_vport *vport = hclge_get_vport(handle);
10520         struct hclge_dev *hdev = vport->back;
10521
10522         switch (status) {
10523         case ETHTOOL_ID_ACTIVE:
10524                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
10525         case ETHTOOL_ID_INACTIVE:
10526                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
10527         default:
10528                 return -EINVAL;
10529         }
10530 }
10531
10532 static void hclge_get_link_mode(struct hnae3_handle *handle,
10533                                 unsigned long *supported,
10534                                 unsigned long *advertising)
10535 {
10536         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
10537         struct hclge_vport *vport = hclge_get_vport(handle);
10538         struct hclge_dev *hdev = vport->back;
10539         unsigned int idx = 0;
10540
10541         for (; idx < size; idx++) {
10542                 supported[idx] = hdev->hw.mac.supported[idx];
10543                 advertising[idx] = hdev->hw.mac.advertising[idx];
10544         }
10545 }
10546
10547 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
10548 {
10549         struct hclge_vport *vport = hclge_get_vport(handle);
10550         struct hclge_dev *hdev = vport->back;
10551
10552         return hclge_config_gro(hdev, enable);
10553 }
10554
10555 static const struct hnae3_ae_ops hclge_ops = {
10556         .init_ae_dev = hclge_init_ae_dev,
10557         .uninit_ae_dev = hclge_uninit_ae_dev,
10558         .flr_prepare = hclge_flr_prepare,
10559         .flr_done = hclge_flr_done,
10560         .init_client_instance = hclge_init_client_instance,
10561         .uninit_client_instance = hclge_uninit_client_instance,
10562         .map_ring_to_vector = hclge_map_ring_to_vector,
10563         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
10564         .get_vector = hclge_get_vector,
10565         .put_vector = hclge_put_vector,
10566         .set_promisc_mode = hclge_set_promisc_mode,
10567         .set_loopback = hclge_set_loopback,
10568         .start = hclge_ae_start,
10569         .stop = hclge_ae_stop,
10570         .client_start = hclge_client_start,
10571         .client_stop = hclge_client_stop,
10572         .get_status = hclge_get_status,
10573         .get_ksettings_an_result = hclge_get_ksettings_an_result,
10574         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
10575         .get_media_type = hclge_get_media_type,
10576         .check_port_speed = hclge_check_port_speed,
10577         .get_fec = hclge_get_fec,
10578         .set_fec = hclge_set_fec,
10579         .get_rss_key_size = hclge_get_rss_key_size,
10580         .get_rss_indir_size = hclge_get_rss_indir_size,
10581         .get_rss = hclge_get_rss,
10582         .set_rss = hclge_set_rss,
10583         .set_rss_tuple = hclge_set_rss_tuple,
10584         .get_rss_tuple = hclge_get_rss_tuple,
10585         .get_tc_size = hclge_get_tc_size,
10586         .get_mac_addr = hclge_get_mac_addr,
10587         .set_mac_addr = hclge_set_mac_addr,
10588         .do_ioctl = hclge_do_ioctl,
10589         .add_uc_addr = hclge_add_uc_addr,
10590         .rm_uc_addr = hclge_rm_uc_addr,
10591         .add_mc_addr = hclge_add_mc_addr,
10592         .rm_mc_addr = hclge_rm_mc_addr,
10593         .set_autoneg = hclge_set_autoneg,
10594         .get_autoneg = hclge_get_autoneg,
10595         .restart_autoneg = hclge_restart_autoneg,
10596         .halt_autoneg = hclge_halt_autoneg,
10597         .get_pauseparam = hclge_get_pauseparam,
10598         .set_pauseparam = hclge_set_pauseparam,
10599         .set_mtu = hclge_set_mtu,
10600         .reset_queue = hclge_reset_tqp,
10601         .get_stats = hclge_get_stats,
10602         .get_mac_stats = hclge_get_mac_stat,
10603         .update_stats = hclge_update_stats,
10604         .get_strings = hclge_get_strings,
10605         .get_sset_count = hclge_get_sset_count,
10606         .get_fw_version = hclge_get_fw_version,
10607         .get_mdix_mode = hclge_get_mdix_mode,
10608         .enable_vlan_filter = hclge_enable_vlan_filter,
10609         .set_vlan_filter = hclge_set_vlan_filter,
10610         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
10611         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
10612         .reset_event = hclge_reset_event,
10613         .get_reset_level = hclge_get_reset_level,
10614         .set_default_reset_request = hclge_set_def_reset_request,
10615         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
10616         .set_channels = hclge_set_channels,
10617         .get_channels = hclge_get_channels,
10618         .get_regs_len = hclge_get_regs_len,
10619         .get_regs = hclge_get_regs,
10620         .set_led_id = hclge_set_led_id,
10621         .get_link_mode = hclge_get_link_mode,
10622         .add_fd_entry = hclge_add_fd_entry,
10623         .del_fd_entry = hclge_del_fd_entry,
10624         .del_all_fd_entries = hclge_del_all_fd_entries,
10625         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
10626         .get_fd_rule_info = hclge_get_fd_rule_info,
10627         .get_fd_all_rules = hclge_get_all_rules,
10628         .restore_fd_rules = hclge_restore_fd_entries,
10629         .enable_fd = hclge_enable_fd,
10630         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
10631         .dbg_run_cmd = hclge_dbg_run_cmd,
10632         .handle_hw_ras_error = hclge_handle_hw_ras_error,
10633         .get_hw_reset_stat = hclge_get_hw_reset_stat,
10634         .ae_dev_resetting = hclge_ae_dev_resetting,
10635         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
10636         .set_gro_en = hclge_gro_en,
10637         .get_global_queue_id = hclge_covert_handle_qid_global,
10638         .set_timer_task = hclge_set_timer_task,
10639         .mac_connect_phy = hclge_mac_connect_phy,
10640         .mac_disconnect_phy = hclge_mac_disconnect_phy,
10641         .restore_vlan_table = hclge_restore_vlan_table,
10642         .get_vf_config = hclge_get_vf_config,
10643         .set_vf_link_state = hclge_set_vf_link_state,
10644         .set_vf_spoofchk = hclge_set_vf_spoofchk,
10645         .set_vf_trust = hclge_set_vf_trust,
10646         .set_vf_rate = hclge_set_vf_rate,
10647         .set_vf_mac = hclge_set_vf_mac,
10648 };
10649
10650 static struct hnae3_ae_algo ae_algo = {
10651         .ops = &hclge_ops,
10652         .pdev_id_table = ae_algo_pci_tbl,
10653 };
10654
10655 static int hclge_init(void)
10656 {
10657         pr_info("%s is initializing\n", HCLGE_NAME);
10658
10659         hclge_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGE_NAME);
10660         if (!hclge_wq) {
10661                 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
10662                 return -ENOMEM;
10663         }
10664
10665         hnae3_register_ae_algo(&ae_algo);
10666
10667         return 0;
10668 }
10669
10670 static void hclge_exit(void)
10671 {
10672         hnae3_unregister_ae_algo(&ae_algo);
10673         destroy_workqueue(hclge_wq);
10674 }
10675 module_init(hclge_init);
10676 module_exit(hclge_exit);
10677
10678 MODULE_LICENSE("GPL");
10679 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
10680 MODULE_DESCRIPTION("HCLGE Driver");
10681 MODULE_VERSION(HCLGE_MOD_VERSION);