net: hns3: add support for VF modify VLAN filter state
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/ipv6.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
23 #include "hclge_tm.h"
24 #include "hclge_err.h"
25 #include "hnae3.h"
26
27 #define HCLGE_NAME                      "hclge"
28 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
29 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30
31 #define HCLGE_BUF_SIZE_UNIT     256U
32 #define HCLGE_BUF_MUL_BY        2
33 #define HCLGE_BUF_DIV_BY        2
34 #define NEED_RESERVE_TC_NUM     2
35 #define BUF_MAX_PERCENT         100
36 #define BUF_RESERVE_PERCENT     90
37
38 #define HCLGE_RESET_MAX_FAIL_CNT        5
39 #define HCLGE_RESET_SYNC_TIME           100
40 #define HCLGE_PF_RESET_SYNC_TIME        20
41 #define HCLGE_PF_RESET_SYNC_CNT         1500
42
43 /* Get DFX BD number offset */
44 #define HCLGE_DFX_BIOS_BD_OFFSET        1
45 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
46 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
47 #define HCLGE_DFX_IGU_BD_OFFSET         4
48 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
49 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
50 #define HCLGE_DFX_NCSI_BD_OFFSET        7
51 #define HCLGE_DFX_RTC_BD_OFFSET         8
52 #define HCLGE_DFX_PPP_BD_OFFSET         9
53 #define HCLGE_DFX_RCB_BD_OFFSET         10
54 #define HCLGE_DFX_TQP_BD_OFFSET         11
55 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
56
57 #define HCLGE_LINK_STATUS_MS    10
58
59 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
60 static int hclge_init_vlan_config(struct hclge_dev *hdev);
61 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
62 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
63 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
64 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
65 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
66 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
67                                                    unsigned long *addr);
68 static int hclge_set_default_loopback(struct hclge_dev *hdev);
69
70 static void hclge_sync_mac_table(struct hclge_dev *hdev);
71 static void hclge_restore_hw_table(struct hclge_dev *hdev);
72 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
73 static void hclge_sync_fd_table(struct hclge_dev *hdev);
74
75 static struct hnae3_ae_algo ae_algo;
76
77 static struct workqueue_struct *hclge_wq;
78
79 static const struct pci_device_id ae_algo_pci_tbl[] = {
80         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
88         /* required last entry */
89         {0, }
90 };
91
92 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
93
94 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
95                                          HCLGE_CMDQ_TX_ADDR_H_REG,
96                                          HCLGE_CMDQ_TX_DEPTH_REG,
97                                          HCLGE_CMDQ_TX_TAIL_REG,
98                                          HCLGE_CMDQ_TX_HEAD_REG,
99                                          HCLGE_CMDQ_RX_ADDR_L_REG,
100                                          HCLGE_CMDQ_RX_ADDR_H_REG,
101                                          HCLGE_CMDQ_RX_DEPTH_REG,
102                                          HCLGE_CMDQ_RX_TAIL_REG,
103                                          HCLGE_CMDQ_RX_HEAD_REG,
104                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
105                                          HCLGE_CMDQ_INTR_STS_REG,
106                                          HCLGE_CMDQ_INTR_EN_REG,
107                                          HCLGE_CMDQ_INTR_GEN_REG};
108
109 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
110                                            HCLGE_VECTOR0_OTER_EN_REG,
111                                            HCLGE_MISC_RESET_STS_REG,
112                                            HCLGE_MISC_VECTOR_INT_STS,
113                                            HCLGE_GLOBAL_RESET_REG,
114                                            HCLGE_FUN_RST_ING,
115                                            HCLGE_GRO_EN_REG};
116
117 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
118                                          HCLGE_RING_RX_ADDR_H_REG,
119                                          HCLGE_RING_RX_BD_NUM_REG,
120                                          HCLGE_RING_RX_BD_LENGTH_REG,
121                                          HCLGE_RING_RX_MERGE_EN_REG,
122                                          HCLGE_RING_RX_TAIL_REG,
123                                          HCLGE_RING_RX_HEAD_REG,
124                                          HCLGE_RING_RX_FBD_NUM_REG,
125                                          HCLGE_RING_RX_OFFSET_REG,
126                                          HCLGE_RING_RX_FBD_OFFSET_REG,
127                                          HCLGE_RING_RX_STASH_REG,
128                                          HCLGE_RING_RX_BD_ERR_REG,
129                                          HCLGE_RING_TX_ADDR_L_REG,
130                                          HCLGE_RING_TX_ADDR_H_REG,
131                                          HCLGE_RING_TX_BD_NUM_REG,
132                                          HCLGE_RING_TX_PRIORITY_REG,
133                                          HCLGE_RING_TX_TC_REG,
134                                          HCLGE_RING_TX_MERGE_EN_REG,
135                                          HCLGE_RING_TX_TAIL_REG,
136                                          HCLGE_RING_TX_HEAD_REG,
137                                          HCLGE_RING_TX_FBD_NUM_REG,
138                                          HCLGE_RING_TX_OFFSET_REG,
139                                          HCLGE_RING_TX_EBD_NUM_REG,
140                                          HCLGE_RING_TX_EBD_OFFSET_REG,
141                                          HCLGE_RING_TX_BD_ERR_REG,
142                                          HCLGE_RING_EN_REG};
143
144 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
145                                              HCLGE_TQP_INTR_GL0_REG,
146                                              HCLGE_TQP_INTR_GL1_REG,
147                                              HCLGE_TQP_INTR_GL2_REG,
148                                              HCLGE_TQP_INTR_RL_REG};
149
150 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
151         "App    Loopback test",
152         "Serdes serial Loopback test",
153         "Serdes parallel Loopback test",
154         "Phy    Loopback test"
155 };
156
157 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
158         {"mac_tx_mac_pause_num",
159                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
160         {"mac_rx_mac_pause_num",
161                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
162         {"mac_tx_control_pkt_num",
163                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
164         {"mac_rx_control_pkt_num",
165                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
166         {"mac_tx_pfc_pkt_num",
167                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
168         {"mac_tx_pfc_pri0_pkt_num",
169                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
170         {"mac_tx_pfc_pri1_pkt_num",
171                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
172         {"mac_tx_pfc_pri2_pkt_num",
173                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
174         {"mac_tx_pfc_pri3_pkt_num",
175                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
176         {"mac_tx_pfc_pri4_pkt_num",
177                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
178         {"mac_tx_pfc_pri5_pkt_num",
179                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
180         {"mac_tx_pfc_pri6_pkt_num",
181                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
182         {"mac_tx_pfc_pri7_pkt_num",
183                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
184         {"mac_rx_pfc_pkt_num",
185                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
186         {"mac_rx_pfc_pri0_pkt_num",
187                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
188         {"mac_rx_pfc_pri1_pkt_num",
189                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
190         {"mac_rx_pfc_pri2_pkt_num",
191                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
192         {"mac_rx_pfc_pri3_pkt_num",
193                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
194         {"mac_rx_pfc_pri4_pkt_num",
195                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
196         {"mac_rx_pfc_pri5_pkt_num",
197                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
198         {"mac_rx_pfc_pri6_pkt_num",
199                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
200         {"mac_rx_pfc_pri7_pkt_num",
201                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
202         {"mac_tx_total_pkt_num",
203                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
204         {"mac_tx_total_oct_num",
205                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
206         {"mac_tx_good_pkt_num",
207                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
208         {"mac_tx_bad_pkt_num",
209                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
210         {"mac_tx_good_oct_num",
211                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
212         {"mac_tx_bad_oct_num",
213                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
214         {"mac_tx_uni_pkt_num",
215                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
216         {"mac_tx_multi_pkt_num",
217                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
218         {"mac_tx_broad_pkt_num",
219                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
220         {"mac_tx_undersize_pkt_num",
221                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
222         {"mac_tx_oversize_pkt_num",
223                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
224         {"mac_tx_64_oct_pkt_num",
225                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
226         {"mac_tx_65_127_oct_pkt_num",
227                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
228         {"mac_tx_128_255_oct_pkt_num",
229                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
230         {"mac_tx_256_511_oct_pkt_num",
231                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
232         {"mac_tx_512_1023_oct_pkt_num",
233                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
234         {"mac_tx_1024_1518_oct_pkt_num",
235                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
236         {"mac_tx_1519_2047_oct_pkt_num",
237                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
238         {"mac_tx_2048_4095_oct_pkt_num",
239                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
240         {"mac_tx_4096_8191_oct_pkt_num",
241                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
242         {"mac_tx_8192_9216_oct_pkt_num",
243                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
244         {"mac_tx_9217_12287_oct_pkt_num",
245                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
246         {"mac_tx_12288_16383_oct_pkt_num",
247                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
248         {"mac_tx_1519_max_good_pkt_num",
249                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
250         {"mac_tx_1519_max_bad_pkt_num",
251                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
252         {"mac_rx_total_pkt_num",
253                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
254         {"mac_rx_total_oct_num",
255                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
256         {"mac_rx_good_pkt_num",
257                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
258         {"mac_rx_bad_pkt_num",
259                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
260         {"mac_rx_good_oct_num",
261                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
262         {"mac_rx_bad_oct_num",
263                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
264         {"mac_rx_uni_pkt_num",
265                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
266         {"mac_rx_multi_pkt_num",
267                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
268         {"mac_rx_broad_pkt_num",
269                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
270         {"mac_rx_undersize_pkt_num",
271                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
272         {"mac_rx_oversize_pkt_num",
273                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
274         {"mac_rx_64_oct_pkt_num",
275                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
276         {"mac_rx_65_127_oct_pkt_num",
277                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
278         {"mac_rx_128_255_oct_pkt_num",
279                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
280         {"mac_rx_256_511_oct_pkt_num",
281                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
282         {"mac_rx_512_1023_oct_pkt_num",
283                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
284         {"mac_rx_1024_1518_oct_pkt_num",
285                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
286         {"mac_rx_1519_2047_oct_pkt_num",
287                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
288         {"mac_rx_2048_4095_oct_pkt_num",
289                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
290         {"mac_rx_4096_8191_oct_pkt_num",
291                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
292         {"mac_rx_8192_9216_oct_pkt_num",
293                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
294         {"mac_rx_9217_12287_oct_pkt_num",
295                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
296         {"mac_rx_12288_16383_oct_pkt_num",
297                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
298         {"mac_rx_1519_max_good_pkt_num",
299                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
300         {"mac_rx_1519_max_bad_pkt_num",
301                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
302
303         {"mac_tx_fragment_pkt_num",
304                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
305         {"mac_tx_undermin_pkt_num",
306                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
307         {"mac_tx_jabber_pkt_num",
308                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
309         {"mac_tx_err_all_pkt_num",
310                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
311         {"mac_tx_from_app_good_pkt_num",
312                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
313         {"mac_tx_from_app_bad_pkt_num",
314                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
315         {"mac_rx_fragment_pkt_num",
316                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
317         {"mac_rx_undermin_pkt_num",
318                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
319         {"mac_rx_jabber_pkt_num",
320                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
321         {"mac_rx_fcs_err_pkt_num",
322                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
323         {"mac_rx_send_app_good_pkt_num",
324                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
325         {"mac_rx_send_app_bad_pkt_num",
326                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
327 };
328
329 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
330         {
331                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
332                 .ethter_type = cpu_to_le16(ETH_P_LLDP),
333                 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
334                 .i_port_bitmap = 0x1,
335         },
336 };
337
338 static const u8 hclge_hash_key[] = {
339         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
340         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
341         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
342         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
343         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
344 };
345
346 static const u32 hclge_dfx_bd_offset_list[] = {
347         HCLGE_DFX_BIOS_BD_OFFSET,
348         HCLGE_DFX_SSU_0_BD_OFFSET,
349         HCLGE_DFX_SSU_1_BD_OFFSET,
350         HCLGE_DFX_IGU_BD_OFFSET,
351         HCLGE_DFX_RPU_0_BD_OFFSET,
352         HCLGE_DFX_RPU_1_BD_OFFSET,
353         HCLGE_DFX_NCSI_BD_OFFSET,
354         HCLGE_DFX_RTC_BD_OFFSET,
355         HCLGE_DFX_PPP_BD_OFFSET,
356         HCLGE_DFX_RCB_BD_OFFSET,
357         HCLGE_DFX_TQP_BD_OFFSET,
358         HCLGE_DFX_SSU_2_BD_OFFSET
359 };
360
361 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
362         HCLGE_OPC_DFX_BIOS_COMMON_REG,
363         HCLGE_OPC_DFX_SSU_REG_0,
364         HCLGE_OPC_DFX_SSU_REG_1,
365         HCLGE_OPC_DFX_IGU_EGU_REG,
366         HCLGE_OPC_DFX_RPU_REG_0,
367         HCLGE_OPC_DFX_RPU_REG_1,
368         HCLGE_OPC_DFX_NCSI_REG,
369         HCLGE_OPC_DFX_RTC_REG,
370         HCLGE_OPC_DFX_PPP_REG,
371         HCLGE_OPC_DFX_RCB_REG,
372         HCLGE_OPC_DFX_TQP_REG,
373         HCLGE_OPC_DFX_SSU_REG_2
374 };
375
376 static const struct key_info meta_data_key_info[] = {
377         { PACKET_TYPE_ID, 6},
378         { IP_FRAGEMENT, 1},
379         { ROCE_TYPE, 1},
380         { NEXT_KEY, 5},
381         { VLAN_NUMBER, 2},
382         { SRC_VPORT, 12},
383         { DST_VPORT, 12},
384         { TUNNEL_PACKET, 1},
385 };
386
387 static const struct key_info tuple_key_info[] = {
388         { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
389         { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
390         { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
391         { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
392         { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
393         { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
394         { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
395         { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
396         { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
397         { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
398         { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
399         { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
400         { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
401         { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
402         { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
403         { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
404         { INNER_DST_MAC, 48, KEY_OPT_MAC,
405           offsetof(struct hclge_fd_rule, tuples.dst_mac),
406           offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
407         { INNER_SRC_MAC, 48, KEY_OPT_MAC,
408           offsetof(struct hclge_fd_rule, tuples.src_mac),
409           offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
410         { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
411           offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
412           offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
413         { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
414         { INNER_ETH_TYPE, 16, KEY_OPT_LE16,
415           offsetof(struct hclge_fd_rule, tuples.ether_proto),
416           offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
417         { INNER_L2_RSV, 16, KEY_OPT_LE16,
418           offsetof(struct hclge_fd_rule, tuples.l2_user_def),
419           offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
420         { INNER_IP_TOS, 8, KEY_OPT_U8,
421           offsetof(struct hclge_fd_rule, tuples.ip_tos),
422           offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
423         { INNER_IP_PROTO, 8, KEY_OPT_U8,
424           offsetof(struct hclge_fd_rule, tuples.ip_proto),
425           offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
426         { INNER_SRC_IP, 32, KEY_OPT_IP,
427           offsetof(struct hclge_fd_rule, tuples.src_ip),
428           offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
429         { INNER_DST_IP, 32, KEY_OPT_IP,
430           offsetof(struct hclge_fd_rule, tuples.dst_ip),
431           offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
432         { INNER_L3_RSV, 16, KEY_OPT_LE16,
433           offsetof(struct hclge_fd_rule, tuples.l3_user_def),
434           offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
435         { INNER_SRC_PORT, 16, KEY_OPT_LE16,
436           offsetof(struct hclge_fd_rule, tuples.src_port),
437           offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
438         { INNER_DST_PORT, 16, KEY_OPT_LE16,
439           offsetof(struct hclge_fd_rule, tuples.dst_port),
440           offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
441         { INNER_L4_RSV, 32, KEY_OPT_LE32,
442           offsetof(struct hclge_fd_rule, tuples.l4_user_def),
443           offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
444 };
445
446 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
447 {
448 #define HCLGE_MAC_CMD_NUM 21
449
450         u64 *data = (u64 *)(&hdev->mac_stats);
451         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
452         __le64 *desc_data;
453         int i, k, n;
454         int ret;
455
456         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
457         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
458         if (ret) {
459                 dev_err(&hdev->pdev->dev,
460                         "Get MAC pkt stats fail, status = %d.\n", ret);
461
462                 return ret;
463         }
464
465         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
466                 /* for special opcode 0032, only the first desc has the head */
467                 if (unlikely(i == 0)) {
468                         desc_data = (__le64 *)(&desc[i].data[0]);
469                         n = HCLGE_RD_FIRST_STATS_NUM;
470                 } else {
471                         desc_data = (__le64 *)(&desc[i]);
472                         n = HCLGE_RD_OTHER_STATS_NUM;
473                 }
474
475                 for (k = 0; k < n; k++) {
476                         *data += le64_to_cpu(*desc_data);
477                         data++;
478                         desc_data++;
479                 }
480         }
481
482         return 0;
483 }
484
485 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
486 {
487         u64 *data = (u64 *)(&hdev->mac_stats);
488         struct hclge_desc *desc;
489         __le64 *desc_data;
490         u16 i, k, n;
491         int ret;
492
493         /* This may be called inside atomic sections,
494          * so GFP_ATOMIC is more suitalbe here
495          */
496         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
497         if (!desc)
498                 return -ENOMEM;
499
500         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
501         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
502         if (ret) {
503                 kfree(desc);
504                 return ret;
505         }
506
507         for (i = 0; i < desc_num; i++) {
508                 /* for special opcode 0034, only the first desc has the head */
509                 if (i == 0) {
510                         desc_data = (__le64 *)(&desc[i].data[0]);
511                         n = HCLGE_RD_FIRST_STATS_NUM;
512                 } else {
513                         desc_data = (__le64 *)(&desc[i]);
514                         n = HCLGE_RD_OTHER_STATS_NUM;
515                 }
516
517                 for (k = 0; k < n; k++) {
518                         *data += le64_to_cpu(*desc_data);
519                         data++;
520                         desc_data++;
521                 }
522         }
523
524         kfree(desc);
525
526         return 0;
527 }
528
529 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
530 {
531         struct hclge_desc desc;
532         __le32 *desc_data;
533         u32 reg_num;
534         int ret;
535
536         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
537         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
538         if (ret)
539                 return ret;
540
541         desc_data = (__le32 *)(&desc.data[0]);
542         reg_num = le32_to_cpu(*desc_data);
543
544         *desc_num = 1 + ((reg_num - 3) >> 2) +
545                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
546
547         return 0;
548 }
549
550 static int hclge_mac_update_stats(struct hclge_dev *hdev)
551 {
552         u32 desc_num;
553         int ret;
554
555         ret = hclge_mac_query_reg_num(hdev, &desc_num);
556         /* The firmware supports the new statistics acquisition method */
557         if (!ret)
558                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
559         else if (ret == -EOPNOTSUPP)
560                 ret = hclge_mac_update_stats_defective(hdev);
561         else
562                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
563
564         return ret;
565 }
566
567 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
568 {
569         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
570         struct hclge_vport *vport = hclge_get_vport(handle);
571         struct hclge_dev *hdev = vport->back;
572         struct hnae3_queue *queue;
573         struct hclge_desc desc[1];
574         struct hclge_tqp *tqp;
575         int ret, i;
576
577         for (i = 0; i < kinfo->num_tqps; i++) {
578                 queue = handle->kinfo.tqp[i];
579                 tqp = container_of(queue, struct hclge_tqp, q);
580                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
581                 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
582                                            true);
583
584                 desc[0].data[0] = cpu_to_le32(tqp->index);
585                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
586                 if (ret) {
587                         dev_err(&hdev->pdev->dev,
588                                 "Query tqp stat fail, status = %d,queue = %d\n",
589                                 ret, i);
590                         return ret;
591                 }
592                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
593                         le32_to_cpu(desc[0].data[1]);
594         }
595
596         for (i = 0; i < kinfo->num_tqps; i++) {
597                 queue = handle->kinfo.tqp[i];
598                 tqp = container_of(queue, struct hclge_tqp, q);
599                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
600                 hclge_cmd_setup_basic_desc(&desc[0],
601                                            HCLGE_OPC_QUERY_TX_STATS,
602                                            true);
603
604                 desc[0].data[0] = cpu_to_le32(tqp->index);
605                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
606                 if (ret) {
607                         dev_err(&hdev->pdev->dev,
608                                 "Query tqp stat fail, status = %d,queue = %d\n",
609                                 ret, i);
610                         return ret;
611                 }
612                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
613                         le32_to_cpu(desc[0].data[1]);
614         }
615
616         return 0;
617 }
618
619 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
620 {
621         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
622         struct hclge_tqp *tqp;
623         u64 *buff = data;
624         int i;
625
626         for (i = 0; i < kinfo->num_tqps; i++) {
627                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
628                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
629         }
630
631         for (i = 0; i < kinfo->num_tqps; i++) {
632                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
633                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
634         }
635
636         return buff;
637 }
638
639 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
640 {
641         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
642
643         /* each tqp has TX & RX two queues */
644         return kinfo->num_tqps * (2);
645 }
646
647 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
648 {
649         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
650         u8 *buff = data;
651         int i;
652
653         for (i = 0; i < kinfo->num_tqps; i++) {
654                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
655                         struct hclge_tqp, q);
656                 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
657                          tqp->index);
658                 buff = buff + ETH_GSTRING_LEN;
659         }
660
661         for (i = 0; i < kinfo->num_tqps; i++) {
662                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
663                         struct hclge_tqp, q);
664                 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
665                          tqp->index);
666                 buff = buff + ETH_GSTRING_LEN;
667         }
668
669         return buff;
670 }
671
672 static u64 *hclge_comm_get_stats(const void *comm_stats,
673                                  const struct hclge_comm_stats_str strs[],
674                                  int size, u64 *data)
675 {
676         u64 *buf = data;
677         u32 i;
678
679         for (i = 0; i < size; i++)
680                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
681
682         return buf + size;
683 }
684
685 static u8 *hclge_comm_get_strings(u32 stringset,
686                                   const struct hclge_comm_stats_str strs[],
687                                   int size, u8 *data)
688 {
689         char *buff = (char *)data;
690         u32 i;
691
692         if (stringset != ETH_SS_STATS)
693                 return buff;
694
695         for (i = 0; i < size; i++) {
696                 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
697                 buff = buff + ETH_GSTRING_LEN;
698         }
699
700         return (u8 *)buff;
701 }
702
703 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
704 {
705         struct hnae3_handle *handle;
706         int status;
707
708         handle = &hdev->vport[0].nic;
709         if (handle->client) {
710                 status = hclge_tqps_update_stats(handle);
711                 if (status) {
712                         dev_err(&hdev->pdev->dev,
713                                 "Update TQPS stats fail, status = %d.\n",
714                                 status);
715                 }
716         }
717
718         status = hclge_mac_update_stats(hdev);
719         if (status)
720                 dev_err(&hdev->pdev->dev,
721                         "Update MAC stats fail, status = %d.\n", status);
722 }
723
724 static void hclge_update_stats(struct hnae3_handle *handle,
725                                struct net_device_stats *net_stats)
726 {
727         struct hclge_vport *vport = hclge_get_vport(handle);
728         struct hclge_dev *hdev = vport->back;
729         int status;
730
731         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
732                 return;
733
734         status = hclge_mac_update_stats(hdev);
735         if (status)
736                 dev_err(&hdev->pdev->dev,
737                         "Update MAC stats fail, status = %d.\n",
738                         status);
739
740         status = hclge_tqps_update_stats(handle);
741         if (status)
742                 dev_err(&hdev->pdev->dev,
743                         "Update TQPS stats fail, status = %d.\n",
744                         status);
745
746         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
747 }
748
749 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
750 {
751 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
752                 HNAE3_SUPPORT_PHY_LOOPBACK |\
753                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
754                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
755
756         struct hclge_vport *vport = hclge_get_vport(handle);
757         struct hclge_dev *hdev = vport->back;
758         int count = 0;
759
760         /* Loopback test support rules:
761          * mac: only GE mode support
762          * serdes: all mac mode will support include GE/XGE/LGE/CGE
763          * phy: only support when phy device exist on board
764          */
765         if (stringset == ETH_SS_TEST) {
766                 /* clear loopback bit flags at first */
767                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
768                 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
769                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
770                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
771                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
772                         count += 1;
773                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
774                 }
775
776                 count += 2;
777                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
778                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
779
780                 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
781                      hdev->hw.mac.phydev->drv->set_loopback) ||
782                     hnae3_dev_phy_imp_supported(hdev)) {
783                         count += 1;
784                         handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
785                 }
786         } else if (stringset == ETH_SS_STATS) {
787                 count = ARRAY_SIZE(g_mac_stats_string) +
788                         hclge_tqps_get_sset_count(handle, stringset);
789         }
790
791         return count;
792 }
793
794 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
795                               u8 *data)
796 {
797         u8 *p = (char *)data;
798         int size;
799
800         if (stringset == ETH_SS_STATS) {
801                 size = ARRAY_SIZE(g_mac_stats_string);
802                 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
803                                            size, p);
804                 p = hclge_tqps_get_strings(handle, p);
805         } else if (stringset == ETH_SS_TEST) {
806                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
807                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
808                                ETH_GSTRING_LEN);
809                         p += ETH_GSTRING_LEN;
810                 }
811                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
812                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
813                                ETH_GSTRING_LEN);
814                         p += ETH_GSTRING_LEN;
815                 }
816                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
817                         memcpy(p,
818                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
819                                ETH_GSTRING_LEN);
820                         p += ETH_GSTRING_LEN;
821                 }
822                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
823                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
824                                ETH_GSTRING_LEN);
825                         p += ETH_GSTRING_LEN;
826                 }
827         }
828 }
829
830 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
831 {
832         struct hclge_vport *vport = hclge_get_vport(handle);
833         struct hclge_dev *hdev = vport->back;
834         u64 *p;
835
836         p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
837                                  ARRAY_SIZE(g_mac_stats_string), data);
838         p = hclge_tqps_get_stats(handle, p);
839 }
840
841 static void hclge_get_mac_stat(struct hnae3_handle *handle,
842                                struct hns3_mac_stats *mac_stats)
843 {
844         struct hclge_vport *vport = hclge_get_vport(handle);
845         struct hclge_dev *hdev = vport->back;
846
847         hclge_update_stats(handle, NULL);
848
849         mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
850         mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
851 }
852
853 static int hclge_parse_func_status(struct hclge_dev *hdev,
854                                    struct hclge_func_status_cmd *status)
855 {
856 #define HCLGE_MAC_ID_MASK       0xF
857
858         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
859                 return -EINVAL;
860
861         /* Set the pf to main pf */
862         if (status->pf_state & HCLGE_PF_STATE_MAIN)
863                 hdev->flag |= HCLGE_FLAG_MAIN;
864         else
865                 hdev->flag &= ~HCLGE_FLAG_MAIN;
866
867         hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
868         return 0;
869 }
870
871 static int hclge_query_function_status(struct hclge_dev *hdev)
872 {
873 #define HCLGE_QUERY_MAX_CNT     5
874
875         struct hclge_func_status_cmd *req;
876         struct hclge_desc desc;
877         int timeout = 0;
878         int ret;
879
880         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
881         req = (struct hclge_func_status_cmd *)desc.data;
882
883         do {
884                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
885                 if (ret) {
886                         dev_err(&hdev->pdev->dev,
887                                 "query function status failed %d.\n", ret);
888                         return ret;
889                 }
890
891                 /* Check pf reset is done */
892                 if (req->pf_state)
893                         break;
894                 usleep_range(1000, 2000);
895         } while (timeout++ < HCLGE_QUERY_MAX_CNT);
896
897         return hclge_parse_func_status(hdev, req);
898 }
899
900 static int hclge_query_pf_resource(struct hclge_dev *hdev)
901 {
902         struct hclge_pf_res_cmd *req;
903         struct hclge_desc desc;
904         int ret;
905
906         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
907         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
908         if (ret) {
909                 dev_err(&hdev->pdev->dev,
910                         "query pf resource failed %d.\n", ret);
911                 return ret;
912         }
913
914         req = (struct hclge_pf_res_cmd *)desc.data;
915         hdev->num_tqps = le16_to_cpu(req->tqp_num) +
916                          le16_to_cpu(req->ext_tqp_num);
917         hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
918
919         if (req->tx_buf_size)
920                 hdev->tx_buf_size =
921                         le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
922         else
923                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
924
925         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
926
927         if (req->dv_buf_size)
928                 hdev->dv_buf_size =
929                         le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
930         else
931                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
932
933         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
934
935         hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
936         if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
937                 dev_err(&hdev->pdev->dev,
938                         "only %u msi resources available, not enough for pf(min:2).\n",
939                         hdev->num_nic_msi);
940                 return -EINVAL;
941         }
942
943         if (hnae3_dev_roce_supported(hdev)) {
944                 hdev->num_roce_msi =
945                         le16_to_cpu(req->pf_intr_vector_number_roce);
946
947                 /* PF should have NIC vectors and Roce vectors,
948                  * NIC vectors are queued before Roce vectors.
949                  */
950                 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
951         } else {
952                 hdev->num_msi = hdev->num_nic_msi;
953         }
954
955         return 0;
956 }
957
958 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
959 {
960         switch (speed_cmd) {
961         case 6:
962                 *speed = HCLGE_MAC_SPEED_10M;
963                 break;
964         case 7:
965                 *speed = HCLGE_MAC_SPEED_100M;
966                 break;
967         case 0:
968                 *speed = HCLGE_MAC_SPEED_1G;
969                 break;
970         case 1:
971                 *speed = HCLGE_MAC_SPEED_10G;
972                 break;
973         case 2:
974                 *speed = HCLGE_MAC_SPEED_25G;
975                 break;
976         case 3:
977                 *speed = HCLGE_MAC_SPEED_40G;
978                 break;
979         case 4:
980                 *speed = HCLGE_MAC_SPEED_50G;
981                 break;
982         case 5:
983                 *speed = HCLGE_MAC_SPEED_100G;
984                 break;
985         case 8:
986                 *speed = HCLGE_MAC_SPEED_200G;
987                 break;
988         default:
989                 return -EINVAL;
990         }
991
992         return 0;
993 }
994
995 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
996 {
997         struct hclge_vport *vport = hclge_get_vport(handle);
998         struct hclge_dev *hdev = vport->back;
999         u32 speed_ability = hdev->hw.mac.speed_ability;
1000         u32 speed_bit = 0;
1001
1002         switch (speed) {
1003         case HCLGE_MAC_SPEED_10M:
1004                 speed_bit = HCLGE_SUPPORT_10M_BIT;
1005                 break;
1006         case HCLGE_MAC_SPEED_100M:
1007                 speed_bit = HCLGE_SUPPORT_100M_BIT;
1008                 break;
1009         case HCLGE_MAC_SPEED_1G:
1010                 speed_bit = HCLGE_SUPPORT_1G_BIT;
1011                 break;
1012         case HCLGE_MAC_SPEED_10G:
1013                 speed_bit = HCLGE_SUPPORT_10G_BIT;
1014                 break;
1015         case HCLGE_MAC_SPEED_25G:
1016                 speed_bit = HCLGE_SUPPORT_25G_BIT;
1017                 break;
1018         case HCLGE_MAC_SPEED_40G:
1019                 speed_bit = HCLGE_SUPPORT_40G_BIT;
1020                 break;
1021         case HCLGE_MAC_SPEED_50G:
1022                 speed_bit = HCLGE_SUPPORT_50G_BIT;
1023                 break;
1024         case HCLGE_MAC_SPEED_100G:
1025                 speed_bit = HCLGE_SUPPORT_100G_BIT;
1026                 break;
1027         case HCLGE_MAC_SPEED_200G:
1028                 speed_bit = HCLGE_SUPPORT_200G_BIT;
1029                 break;
1030         default:
1031                 return -EINVAL;
1032         }
1033
1034         if (speed_bit & speed_ability)
1035                 return 0;
1036
1037         return -EINVAL;
1038 }
1039
1040 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1041 {
1042         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1043                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1044                                  mac->supported);
1045         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1046                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1047                                  mac->supported);
1048         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1049                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1050                                  mac->supported);
1051         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1052                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1053                                  mac->supported);
1054         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1055                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1056                                  mac->supported);
1057         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1058                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1059                                  mac->supported);
1060 }
1061
1062 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1063 {
1064         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1065                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1066                                  mac->supported);
1067         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1068                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1069                                  mac->supported);
1070         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1071                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1072                                  mac->supported);
1073         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1074                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1075                                  mac->supported);
1076         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1077                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1078                                  mac->supported);
1079         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1080                 linkmode_set_bit(
1081                         ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1082                         mac->supported);
1083 }
1084
1085 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1086 {
1087         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1088                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1089                                  mac->supported);
1090         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1091                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1092                                  mac->supported);
1093         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1094                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1095                                  mac->supported);
1096         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1097                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1098                                  mac->supported);
1099         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1100                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1101                                  mac->supported);
1102         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1103                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1104                                  mac->supported);
1105 }
1106
1107 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1108 {
1109         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1110                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1111                                  mac->supported);
1112         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1113                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1114                                  mac->supported);
1115         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1116                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1117                                  mac->supported);
1118         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1119                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1120                                  mac->supported);
1121         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1122                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1123                                  mac->supported);
1124         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1125                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1126                                  mac->supported);
1127         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1128                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1129                                  mac->supported);
1130 }
1131
1132 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1133 {
1134         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1135         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1136
1137         switch (mac->speed) {
1138         case HCLGE_MAC_SPEED_10G:
1139         case HCLGE_MAC_SPEED_40G:
1140                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1141                                  mac->supported);
1142                 mac->fec_ability =
1143                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1144                 break;
1145         case HCLGE_MAC_SPEED_25G:
1146         case HCLGE_MAC_SPEED_50G:
1147                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1148                                  mac->supported);
1149                 mac->fec_ability =
1150                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1151                         BIT(HNAE3_FEC_AUTO);
1152                 break;
1153         case HCLGE_MAC_SPEED_100G:
1154         case HCLGE_MAC_SPEED_200G:
1155                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1156                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1157                 break;
1158         default:
1159                 mac->fec_ability = 0;
1160                 break;
1161         }
1162 }
1163
1164 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1165                                         u16 speed_ability)
1166 {
1167         struct hclge_mac *mac = &hdev->hw.mac;
1168
1169         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1171                                  mac->supported);
1172
1173         hclge_convert_setting_sr(mac, speed_ability);
1174         hclge_convert_setting_lr(mac, speed_ability);
1175         hclge_convert_setting_cr(mac, speed_ability);
1176         if (hnae3_dev_fec_supported(hdev))
1177                 hclge_convert_setting_fec(mac);
1178
1179         if (hnae3_dev_pause_supported(hdev))
1180                 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1181
1182         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1183         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1184 }
1185
1186 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1187                                             u16 speed_ability)
1188 {
1189         struct hclge_mac *mac = &hdev->hw.mac;
1190
1191         hclge_convert_setting_kr(mac, speed_ability);
1192         if (hnae3_dev_fec_supported(hdev))
1193                 hclge_convert_setting_fec(mac);
1194
1195         if (hnae3_dev_pause_supported(hdev))
1196                 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1197
1198         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1199         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1200 }
1201
1202 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1203                                          u16 speed_ability)
1204 {
1205         unsigned long *supported = hdev->hw.mac.supported;
1206
1207         /* default to support all speed for GE port */
1208         if (!speed_ability)
1209                 speed_ability = HCLGE_SUPPORT_GE;
1210
1211         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1212                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1213                                  supported);
1214
1215         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1216                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1217                                  supported);
1218                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1219                                  supported);
1220         }
1221
1222         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1223                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1224                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1225         }
1226
1227         if (hnae3_dev_pause_supported(hdev)) {
1228                 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1229                 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1230         }
1231
1232         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1233         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1234 }
1235
1236 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1237 {
1238         u8 media_type = hdev->hw.mac.media_type;
1239
1240         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1241                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1242         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1243                 hclge_parse_copper_link_mode(hdev, speed_ability);
1244         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1245                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1246 }
1247
1248 static u32 hclge_get_max_speed(u16 speed_ability)
1249 {
1250         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1251                 return HCLGE_MAC_SPEED_200G;
1252
1253         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1254                 return HCLGE_MAC_SPEED_100G;
1255
1256         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1257                 return HCLGE_MAC_SPEED_50G;
1258
1259         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1260                 return HCLGE_MAC_SPEED_40G;
1261
1262         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1263                 return HCLGE_MAC_SPEED_25G;
1264
1265         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1266                 return HCLGE_MAC_SPEED_10G;
1267
1268         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1269                 return HCLGE_MAC_SPEED_1G;
1270
1271         if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1272                 return HCLGE_MAC_SPEED_100M;
1273
1274         if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1275                 return HCLGE_MAC_SPEED_10M;
1276
1277         return HCLGE_MAC_SPEED_1G;
1278 }
1279
1280 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1281 {
1282 #define SPEED_ABILITY_EXT_SHIFT                 8
1283
1284         struct hclge_cfg_param_cmd *req;
1285         u64 mac_addr_tmp_high;
1286         u16 speed_ability_ext;
1287         u64 mac_addr_tmp;
1288         unsigned int i;
1289
1290         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1291
1292         /* get the configuration */
1293         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1294                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1295         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1296                                             HCLGE_CFG_TQP_DESC_N_M,
1297                                             HCLGE_CFG_TQP_DESC_N_S);
1298
1299         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1300                                         HCLGE_CFG_PHY_ADDR_M,
1301                                         HCLGE_CFG_PHY_ADDR_S);
1302         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1303                                           HCLGE_CFG_MEDIA_TP_M,
1304                                           HCLGE_CFG_MEDIA_TP_S);
1305         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1306                                           HCLGE_CFG_RX_BUF_LEN_M,
1307                                           HCLGE_CFG_RX_BUF_LEN_S);
1308         /* get mac_address */
1309         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1310         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1311                                             HCLGE_CFG_MAC_ADDR_H_M,
1312                                             HCLGE_CFG_MAC_ADDR_H_S);
1313
1314         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1315
1316         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1317                                              HCLGE_CFG_DEFAULT_SPEED_M,
1318                                              HCLGE_CFG_DEFAULT_SPEED_S);
1319         cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1320                                                HCLGE_CFG_RSS_SIZE_M,
1321                                                HCLGE_CFG_RSS_SIZE_S);
1322
1323         for (i = 0; i < ETH_ALEN; i++)
1324                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1325
1326         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1327         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1328
1329         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1330                                              HCLGE_CFG_SPEED_ABILITY_M,
1331                                              HCLGE_CFG_SPEED_ABILITY_S);
1332         speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1333                                             HCLGE_CFG_SPEED_ABILITY_EXT_M,
1334                                             HCLGE_CFG_SPEED_ABILITY_EXT_S);
1335         cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1336
1337         cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
1338                                                HCLGE_CFG_VLAN_FLTR_CAP_M,
1339                                                HCLGE_CFG_VLAN_FLTR_CAP_S);
1340
1341         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1342                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1343                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1344         if (!cfg->umv_space)
1345                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1346
1347         cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1348                                                HCLGE_CFG_PF_RSS_SIZE_M,
1349                                                HCLGE_CFG_PF_RSS_SIZE_S);
1350
1351         /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1352          * power of 2, instead of reading out directly. This would
1353          * be more flexible for future changes and expansions.
1354          * When VF max  rss size field is HCLGE_CFG_RSS_SIZE_S,
1355          * it does not make sense if PF's field is 0. In this case, PF and VF
1356          * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1357          */
1358         cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1359                                1U << cfg->pf_rss_size_max :
1360                                cfg->vf_rss_size_max;
1361 }
1362
1363 /* hclge_get_cfg: query the static parameter from flash
1364  * @hdev: pointer to struct hclge_dev
1365  * @hcfg: the config structure to be getted
1366  */
1367 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1368 {
1369         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1370         struct hclge_cfg_param_cmd *req;
1371         unsigned int i;
1372         int ret;
1373
1374         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1375                 u32 offset = 0;
1376
1377                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1378                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1379                                            true);
1380                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1381                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1382                 /* Len should be united by 4 bytes when send to hardware */
1383                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1384                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1385                 req->offset = cpu_to_le32(offset);
1386         }
1387
1388         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1389         if (ret) {
1390                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1391                 return ret;
1392         }
1393
1394         hclge_parse_cfg(hcfg, desc);
1395
1396         return 0;
1397 }
1398
1399 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1400 {
1401 #define HCLGE_MAX_NON_TSO_BD_NUM                        8U
1402
1403         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1404
1405         ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1406         ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1407         ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1408         ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1409         ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1410         ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1411         ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1412 }
1413
1414 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1415                                   struct hclge_desc *desc)
1416 {
1417         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1418         struct hclge_dev_specs_0_cmd *req0;
1419         struct hclge_dev_specs_1_cmd *req1;
1420
1421         req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1422         req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1423
1424         ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1425         ae_dev->dev_specs.rss_ind_tbl_size =
1426                 le16_to_cpu(req0->rss_ind_tbl_size);
1427         ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1428         ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1429         ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1430         ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1431         ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1432         ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1433 }
1434
1435 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1436 {
1437         struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1438
1439         if (!dev_specs->max_non_tso_bd_num)
1440                 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1441         if (!dev_specs->rss_ind_tbl_size)
1442                 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1443         if (!dev_specs->rss_key_size)
1444                 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1445         if (!dev_specs->max_tm_rate)
1446                 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1447         if (!dev_specs->max_qset_num)
1448                 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1449         if (!dev_specs->max_int_gl)
1450                 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1451         if (!dev_specs->max_frm_size)
1452                 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1453 }
1454
1455 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1456 {
1457         struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1458         int ret;
1459         int i;
1460
1461         /* set default specifications as devices lower than version V3 do not
1462          * support querying specifications from firmware.
1463          */
1464         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1465                 hclge_set_default_dev_specs(hdev);
1466                 return 0;
1467         }
1468
1469         for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1470                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1471                                            true);
1472                 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1473         }
1474         hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1475
1476         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1477         if (ret)
1478                 return ret;
1479
1480         hclge_parse_dev_specs(hdev, desc);
1481         hclge_check_dev_specs(hdev);
1482
1483         return 0;
1484 }
1485
1486 static int hclge_get_cap(struct hclge_dev *hdev)
1487 {
1488         int ret;
1489
1490         ret = hclge_query_function_status(hdev);
1491         if (ret) {
1492                 dev_err(&hdev->pdev->dev,
1493                         "query function status error %d.\n", ret);
1494                 return ret;
1495         }
1496
1497         /* get pf resource */
1498         return hclge_query_pf_resource(hdev);
1499 }
1500
1501 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1502 {
1503 #define HCLGE_MIN_TX_DESC       64
1504 #define HCLGE_MIN_RX_DESC       64
1505
1506         if (!is_kdump_kernel())
1507                 return;
1508
1509         dev_info(&hdev->pdev->dev,
1510                  "Running kdump kernel. Using minimal resources\n");
1511
1512         /* minimal queue pairs equals to the number of vports */
1513         hdev->num_tqps = hdev->num_req_vfs + 1;
1514         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1515         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1516 }
1517
1518 static int hclge_configure(struct hclge_dev *hdev)
1519 {
1520         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1521         struct hclge_cfg cfg;
1522         unsigned int i;
1523         int ret;
1524
1525         ret = hclge_get_cfg(hdev, &cfg);
1526         if (ret)
1527                 return ret;
1528
1529         hdev->base_tqp_pid = 0;
1530         hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1531         hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1532         hdev->rx_buf_len = cfg.rx_buf_len;
1533         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1534         hdev->hw.mac.media_type = cfg.media_type;
1535         hdev->hw.mac.phy_addr = cfg.phy_addr;
1536         hdev->num_tx_desc = cfg.tqp_desc_num;
1537         hdev->num_rx_desc = cfg.tqp_desc_num;
1538         hdev->tm_info.num_pg = 1;
1539         hdev->tc_max = cfg.tc_num;
1540         hdev->tm_info.hw_pfc_map = 0;
1541         hdev->wanted_umv_size = cfg.umv_space;
1542         if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
1543                 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
1544
1545         if (hnae3_dev_fd_supported(hdev)) {
1546                 hdev->fd_en = true;
1547                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1548         }
1549
1550         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1551         if (ret) {
1552                 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1553                         cfg.default_speed, ret);
1554                 return ret;
1555         }
1556
1557         hclge_parse_link_mode(hdev, cfg.speed_ability);
1558
1559         hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1560
1561         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1562             (hdev->tc_max < 1)) {
1563                 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1564                          hdev->tc_max);
1565                 hdev->tc_max = 1;
1566         }
1567
1568         /* Dev does not support DCB */
1569         if (!hnae3_dev_dcb_supported(hdev)) {
1570                 hdev->tc_max = 1;
1571                 hdev->pfc_max = 0;
1572         } else {
1573                 hdev->pfc_max = hdev->tc_max;
1574         }
1575
1576         hdev->tm_info.num_tc = 1;
1577
1578         /* Currently not support uncontiuous tc */
1579         for (i = 0; i < hdev->tm_info.num_tc; i++)
1580                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1581
1582         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1583
1584         hclge_init_kdump_kernel_config(hdev);
1585
1586         /* Set the init affinity based on pci func number */
1587         i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1588         i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1589         cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1590                         &hdev->affinity_mask);
1591
1592         return ret;
1593 }
1594
1595 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1596                             u16 tso_mss_max)
1597 {
1598         struct hclge_cfg_tso_status_cmd *req;
1599         struct hclge_desc desc;
1600
1601         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1602
1603         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1604         req->tso_mss_min = cpu_to_le16(tso_mss_min);
1605         req->tso_mss_max = cpu_to_le16(tso_mss_max);
1606
1607         return hclge_cmd_send(&hdev->hw, &desc, 1);
1608 }
1609
1610 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1611 {
1612         struct hclge_cfg_gro_status_cmd *req;
1613         struct hclge_desc desc;
1614         int ret;
1615
1616         if (!hnae3_dev_gro_supported(hdev))
1617                 return 0;
1618
1619         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1620         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1621
1622         req->gro_en = en ? 1 : 0;
1623
1624         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1625         if (ret)
1626                 dev_err(&hdev->pdev->dev,
1627                         "GRO hardware config cmd failed, ret = %d\n", ret);
1628
1629         return ret;
1630 }
1631
1632 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1633 {
1634         struct hclge_tqp *tqp;
1635         int i;
1636
1637         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1638                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1639         if (!hdev->htqp)
1640                 return -ENOMEM;
1641
1642         tqp = hdev->htqp;
1643
1644         for (i = 0; i < hdev->num_tqps; i++) {
1645                 tqp->dev = &hdev->pdev->dev;
1646                 tqp->index = i;
1647
1648                 tqp->q.ae_algo = &ae_algo;
1649                 tqp->q.buf_size = hdev->rx_buf_len;
1650                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1651                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1652
1653                 /* need an extended offset to configure queues >=
1654                  * HCLGE_TQP_MAX_SIZE_DEV_V2
1655                  */
1656                 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1657                         tqp->q.io_base = hdev->hw.io_base +
1658                                          HCLGE_TQP_REG_OFFSET +
1659                                          i * HCLGE_TQP_REG_SIZE;
1660                 else
1661                         tqp->q.io_base = hdev->hw.io_base +
1662                                          HCLGE_TQP_REG_OFFSET +
1663                                          HCLGE_TQP_EXT_REG_OFFSET +
1664                                          (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1665                                          HCLGE_TQP_REG_SIZE;
1666
1667                 tqp++;
1668         }
1669
1670         return 0;
1671 }
1672
1673 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1674                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1675 {
1676         struct hclge_tqp_map_cmd *req;
1677         struct hclge_desc desc;
1678         int ret;
1679
1680         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1681
1682         req = (struct hclge_tqp_map_cmd *)desc.data;
1683         req->tqp_id = cpu_to_le16(tqp_pid);
1684         req->tqp_vf = func_id;
1685         req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1686         if (!is_pf)
1687                 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1688         req->tqp_vid = cpu_to_le16(tqp_vid);
1689
1690         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1691         if (ret)
1692                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1693
1694         return ret;
1695 }
1696
1697 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1698 {
1699         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1700         struct hclge_dev *hdev = vport->back;
1701         int i, alloced;
1702
1703         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1704              alloced < num_tqps; i++) {
1705                 if (!hdev->htqp[i].alloced) {
1706                         hdev->htqp[i].q.handle = &vport->nic;
1707                         hdev->htqp[i].q.tqp_index = alloced;
1708                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1709                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1710                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1711                         hdev->htqp[i].alloced = true;
1712                         alloced++;
1713                 }
1714         }
1715         vport->alloc_tqps = alloced;
1716         kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1717                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1718
1719         /* ensure one to one mapping between irq and queue at default */
1720         kinfo->rss_size = min_t(u16, kinfo->rss_size,
1721                                 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1722
1723         return 0;
1724 }
1725
1726 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1727                             u16 num_tx_desc, u16 num_rx_desc)
1728
1729 {
1730         struct hnae3_handle *nic = &vport->nic;
1731         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1732         struct hclge_dev *hdev = vport->back;
1733         int ret;
1734
1735         kinfo->num_tx_desc = num_tx_desc;
1736         kinfo->num_rx_desc = num_rx_desc;
1737
1738         kinfo->rx_buf_len = hdev->rx_buf_len;
1739
1740         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1741                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1742         if (!kinfo->tqp)
1743                 return -ENOMEM;
1744
1745         ret = hclge_assign_tqp(vport, num_tqps);
1746         if (ret)
1747                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1748
1749         return ret;
1750 }
1751
1752 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1753                                   struct hclge_vport *vport)
1754 {
1755         struct hnae3_handle *nic = &vport->nic;
1756         struct hnae3_knic_private_info *kinfo;
1757         u16 i;
1758
1759         kinfo = &nic->kinfo;
1760         for (i = 0; i < vport->alloc_tqps; i++) {
1761                 struct hclge_tqp *q =
1762                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1763                 bool is_pf;
1764                 int ret;
1765
1766                 is_pf = !(vport->vport_id);
1767                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1768                                              i, is_pf);
1769                 if (ret)
1770                         return ret;
1771         }
1772
1773         return 0;
1774 }
1775
1776 static int hclge_map_tqp(struct hclge_dev *hdev)
1777 {
1778         struct hclge_vport *vport = hdev->vport;
1779         u16 i, num_vport;
1780
1781         num_vport = hdev->num_req_vfs + 1;
1782         for (i = 0; i < num_vport; i++) {
1783                 int ret;
1784
1785                 ret = hclge_map_tqp_to_vport(hdev, vport);
1786                 if (ret)
1787                         return ret;
1788
1789                 vport++;
1790         }
1791
1792         return 0;
1793 }
1794
1795 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1796 {
1797         struct hnae3_handle *nic = &vport->nic;
1798         struct hclge_dev *hdev = vport->back;
1799         int ret;
1800
1801         nic->pdev = hdev->pdev;
1802         nic->ae_algo = &ae_algo;
1803         nic->numa_node_mask = hdev->numa_node_mask;
1804
1805         ret = hclge_knic_setup(vport, num_tqps,
1806                                hdev->num_tx_desc, hdev->num_rx_desc);
1807         if (ret)
1808                 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1809
1810         return ret;
1811 }
1812
1813 static int hclge_alloc_vport(struct hclge_dev *hdev)
1814 {
1815         struct pci_dev *pdev = hdev->pdev;
1816         struct hclge_vport *vport;
1817         u32 tqp_main_vport;
1818         u32 tqp_per_vport;
1819         int num_vport, i;
1820         int ret;
1821
1822         /* We need to alloc a vport for main NIC of PF */
1823         num_vport = hdev->num_req_vfs + 1;
1824
1825         if (hdev->num_tqps < num_vport) {
1826                 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1827                         hdev->num_tqps, num_vport);
1828                 return -EINVAL;
1829         }
1830
1831         /* Alloc the same number of TQPs for every vport */
1832         tqp_per_vport = hdev->num_tqps / num_vport;
1833         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1834
1835         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1836                              GFP_KERNEL);
1837         if (!vport)
1838                 return -ENOMEM;
1839
1840         hdev->vport = vport;
1841         hdev->num_alloc_vport = num_vport;
1842
1843         if (IS_ENABLED(CONFIG_PCI_IOV))
1844                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1845
1846         for (i = 0; i < num_vport; i++) {
1847                 vport->back = hdev;
1848                 vport->vport_id = i;
1849                 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1850                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1851                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1852                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1853                 vport->req_vlan_fltr_en = true;
1854                 INIT_LIST_HEAD(&vport->vlan_list);
1855                 INIT_LIST_HEAD(&vport->uc_mac_list);
1856                 INIT_LIST_HEAD(&vport->mc_mac_list);
1857                 spin_lock_init(&vport->mac_list_lock);
1858
1859                 if (i == 0)
1860                         ret = hclge_vport_setup(vport, tqp_main_vport);
1861                 else
1862                         ret = hclge_vport_setup(vport, tqp_per_vport);
1863                 if (ret) {
1864                         dev_err(&pdev->dev,
1865                                 "vport setup failed for vport %d, %d\n",
1866                                 i, ret);
1867                         return ret;
1868                 }
1869
1870                 vport++;
1871         }
1872
1873         return 0;
1874 }
1875
1876 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1877                                     struct hclge_pkt_buf_alloc *buf_alloc)
1878 {
1879 /* TX buffer size is unit by 128 byte */
1880 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1881 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1882         struct hclge_tx_buff_alloc_cmd *req;
1883         struct hclge_desc desc;
1884         int ret;
1885         u8 i;
1886
1887         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1888
1889         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1890         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1891                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1892
1893                 req->tx_pkt_buff[i] =
1894                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1895                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1896         }
1897
1898         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1899         if (ret)
1900                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1901                         ret);
1902
1903         return ret;
1904 }
1905
1906 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1907                                  struct hclge_pkt_buf_alloc *buf_alloc)
1908 {
1909         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1910
1911         if (ret)
1912                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1913
1914         return ret;
1915 }
1916
1917 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1918 {
1919         unsigned int i;
1920         u32 cnt = 0;
1921
1922         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1923                 if (hdev->hw_tc_map & BIT(i))
1924                         cnt++;
1925         return cnt;
1926 }
1927
1928 /* Get the number of pfc enabled TCs, which have private buffer */
1929 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1930                                   struct hclge_pkt_buf_alloc *buf_alloc)
1931 {
1932         struct hclge_priv_buf *priv;
1933         unsigned int i;
1934         int cnt = 0;
1935
1936         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1937                 priv = &buf_alloc->priv_buf[i];
1938                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1939                     priv->enable)
1940                         cnt++;
1941         }
1942
1943         return cnt;
1944 }
1945
1946 /* Get the number of pfc disabled TCs, which have private buffer */
1947 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1948                                      struct hclge_pkt_buf_alloc *buf_alloc)
1949 {
1950         struct hclge_priv_buf *priv;
1951         unsigned int i;
1952         int cnt = 0;
1953
1954         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1955                 priv = &buf_alloc->priv_buf[i];
1956                 if (hdev->hw_tc_map & BIT(i) &&
1957                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1958                     priv->enable)
1959                         cnt++;
1960         }
1961
1962         return cnt;
1963 }
1964
1965 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1966 {
1967         struct hclge_priv_buf *priv;
1968         u32 rx_priv = 0;
1969         int i;
1970
1971         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1972                 priv = &buf_alloc->priv_buf[i];
1973                 if (priv->enable)
1974                         rx_priv += priv->buf_size;
1975         }
1976         return rx_priv;
1977 }
1978
1979 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1980 {
1981         u32 i, total_tx_size = 0;
1982
1983         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1984                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1985
1986         return total_tx_size;
1987 }
1988
1989 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1990                                 struct hclge_pkt_buf_alloc *buf_alloc,
1991                                 u32 rx_all)
1992 {
1993         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1994         u32 tc_num = hclge_get_tc_num(hdev);
1995         u32 shared_buf, aligned_mps;
1996         u32 rx_priv;
1997         int i;
1998
1999         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2000
2001         if (hnae3_dev_dcb_supported(hdev))
2002                 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
2003                                         hdev->dv_buf_size;
2004         else
2005                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
2006                                         + hdev->dv_buf_size;
2007
2008         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2009         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2010                              HCLGE_BUF_SIZE_UNIT);
2011
2012         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2013         if (rx_all < rx_priv + shared_std)
2014                 return false;
2015
2016         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2017         buf_alloc->s_buf.buf_size = shared_buf;
2018         if (hnae3_dev_dcb_supported(hdev)) {
2019                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2020                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2021                         - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2022                                   HCLGE_BUF_SIZE_UNIT);
2023         } else {
2024                 buf_alloc->s_buf.self.high = aligned_mps +
2025                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
2026                 buf_alloc->s_buf.self.low = aligned_mps;
2027         }
2028
2029         if (hnae3_dev_dcb_supported(hdev)) {
2030                 hi_thrd = shared_buf - hdev->dv_buf_size;
2031
2032                 if (tc_num <= NEED_RESERVE_TC_NUM)
2033                         hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2034                                         / BUF_MAX_PERCENT;
2035
2036                 if (tc_num)
2037                         hi_thrd = hi_thrd / tc_num;
2038
2039                 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2040                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2041                 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2042         } else {
2043                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2044                 lo_thrd = aligned_mps;
2045         }
2046
2047         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2048                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2049                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2050         }
2051
2052         return true;
2053 }
2054
2055 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2056                                 struct hclge_pkt_buf_alloc *buf_alloc)
2057 {
2058         u32 i, total_size;
2059
2060         total_size = hdev->pkt_buf_size;
2061
2062         /* alloc tx buffer for all enabled tc */
2063         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2064                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2065
2066                 if (hdev->hw_tc_map & BIT(i)) {
2067                         if (total_size < hdev->tx_buf_size)
2068                                 return -ENOMEM;
2069
2070                         priv->tx_buf_size = hdev->tx_buf_size;
2071                 } else {
2072                         priv->tx_buf_size = 0;
2073                 }
2074
2075                 total_size -= priv->tx_buf_size;
2076         }
2077
2078         return 0;
2079 }
2080
2081 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2082                                   struct hclge_pkt_buf_alloc *buf_alloc)
2083 {
2084         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2085         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2086         unsigned int i;
2087
2088         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2089                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2090
2091                 priv->enable = 0;
2092                 priv->wl.low = 0;
2093                 priv->wl.high = 0;
2094                 priv->buf_size = 0;
2095
2096                 if (!(hdev->hw_tc_map & BIT(i)))
2097                         continue;
2098
2099                 priv->enable = 1;
2100
2101                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2102                         priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2103                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
2104                                                 HCLGE_BUF_SIZE_UNIT);
2105                 } else {
2106                         priv->wl.low = 0;
2107                         priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2108                                         aligned_mps;
2109                 }
2110
2111                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2112         }
2113
2114         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2115 }
2116
2117 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2118                                           struct hclge_pkt_buf_alloc *buf_alloc)
2119 {
2120         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2121         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2122         int i;
2123
2124         /* let the last to be cleared first */
2125         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2126                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2127                 unsigned int mask = BIT((unsigned int)i);
2128
2129                 if (hdev->hw_tc_map & mask &&
2130                     !(hdev->tm_info.hw_pfc_map & mask)) {
2131                         /* Clear the no pfc TC private buffer */
2132                         priv->wl.low = 0;
2133                         priv->wl.high = 0;
2134                         priv->buf_size = 0;
2135                         priv->enable = 0;
2136                         no_pfc_priv_num--;
2137                 }
2138
2139                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2140                     no_pfc_priv_num == 0)
2141                         break;
2142         }
2143
2144         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2145 }
2146
2147 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2148                                         struct hclge_pkt_buf_alloc *buf_alloc)
2149 {
2150         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2151         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2152         int i;
2153
2154         /* let the last to be cleared first */
2155         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2156                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2157                 unsigned int mask = BIT((unsigned int)i);
2158
2159                 if (hdev->hw_tc_map & mask &&
2160                     hdev->tm_info.hw_pfc_map & mask) {
2161                         /* Reduce the number of pfc TC with private buffer */
2162                         priv->wl.low = 0;
2163                         priv->enable = 0;
2164                         priv->wl.high = 0;
2165                         priv->buf_size = 0;
2166                         pfc_priv_num--;
2167                 }
2168
2169                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2170                     pfc_priv_num == 0)
2171                         break;
2172         }
2173
2174         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2175 }
2176
2177 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2178                                       struct hclge_pkt_buf_alloc *buf_alloc)
2179 {
2180 #define COMPENSATE_BUFFER       0x3C00
2181 #define COMPENSATE_HALF_MPS_NUM 5
2182 #define PRIV_WL_GAP             0x1800
2183
2184         u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2185         u32 tc_num = hclge_get_tc_num(hdev);
2186         u32 half_mps = hdev->mps >> 1;
2187         u32 min_rx_priv;
2188         unsigned int i;
2189
2190         if (tc_num)
2191                 rx_priv = rx_priv / tc_num;
2192
2193         if (tc_num <= NEED_RESERVE_TC_NUM)
2194                 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2195
2196         min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2197                         COMPENSATE_HALF_MPS_NUM * half_mps;
2198         min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2199         rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2200         if (rx_priv < min_rx_priv)
2201                 return false;
2202
2203         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2204                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2205
2206                 priv->enable = 0;
2207                 priv->wl.low = 0;
2208                 priv->wl.high = 0;
2209                 priv->buf_size = 0;
2210
2211                 if (!(hdev->hw_tc_map & BIT(i)))
2212                         continue;
2213
2214                 priv->enable = 1;
2215                 priv->buf_size = rx_priv;
2216                 priv->wl.high = rx_priv - hdev->dv_buf_size;
2217                 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2218         }
2219
2220         buf_alloc->s_buf.buf_size = 0;
2221
2222         return true;
2223 }
2224
2225 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2226  * @hdev: pointer to struct hclge_dev
2227  * @buf_alloc: pointer to buffer calculation data
2228  * @return: 0: calculate successful, negative: fail
2229  */
2230 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2231                                 struct hclge_pkt_buf_alloc *buf_alloc)
2232 {
2233         /* When DCB is not supported, rx private buffer is not allocated. */
2234         if (!hnae3_dev_dcb_supported(hdev)) {
2235                 u32 rx_all = hdev->pkt_buf_size;
2236
2237                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2238                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2239                         return -ENOMEM;
2240
2241                 return 0;
2242         }
2243
2244         if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2245                 return 0;
2246
2247         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2248                 return 0;
2249
2250         /* try to decrease the buffer size */
2251         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2252                 return 0;
2253
2254         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2255                 return 0;
2256
2257         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2258                 return 0;
2259
2260         return -ENOMEM;
2261 }
2262
2263 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2264                                    struct hclge_pkt_buf_alloc *buf_alloc)
2265 {
2266         struct hclge_rx_priv_buff_cmd *req;
2267         struct hclge_desc desc;
2268         int ret;
2269         int i;
2270
2271         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2272         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2273
2274         /* Alloc private buffer TCs */
2275         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2276                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2277
2278                 req->buf_num[i] =
2279                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2280                 req->buf_num[i] |=
2281                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2282         }
2283
2284         req->shared_buf =
2285                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2286                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
2287
2288         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2289         if (ret)
2290                 dev_err(&hdev->pdev->dev,
2291                         "rx private buffer alloc cmd failed %d\n", ret);
2292
2293         return ret;
2294 }
2295
2296 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2297                                    struct hclge_pkt_buf_alloc *buf_alloc)
2298 {
2299         struct hclge_rx_priv_wl_buf *req;
2300         struct hclge_priv_buf *priv;
2301         struct hclge_desc desc[2];
2302         int i, j;
2303         int ret;
2304
2305         for (i = 0; i < 2; i++) {
2306                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2307                                            false);
2308                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2309
2310                 /* The first descriptor set the NEXT bit to 1 */
2311                 if (i == 0)
2312                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2313                 else
2314                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2315
2316                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2317                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2318
2319                         priv = &buf_alloc->priv_buf[idx];
2320                         req->tc_wl[j].high =
2321                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2322                         req->tc_wl[j].high |=
2323                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2324                         req->tc_wl[j].low =
2325                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2326                         req->tc_wl[j].low |=
2327                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2328                 }
2329         }
2330
2331         /* Send 2 descriptor at one time */
2332         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2333         if (ret)
2334                 dev_err(&hdev->pdev->dev,
2335                         "rx private waterline config cmd failed %d\n",
2336                         ret);
2337         return ret;
2338 }
2339
2340 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2341                                     struct hclge_pkt_buf_alloc *buf_alloc)
2342 {
2343         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2344         struct hclge_rx_com_thrd *req;
2345         struct hclge_desc desc[2];
2346         struct hclge_tc_thrd *tc;
2347         int i, j;
2348         int ret;
2349
2350         for (i = 0; i < 2; i++) {
2351                 hclge_cmd_setup_basic_desc(&desc[i],
2352                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2353                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2354
2355                 /* The first descriptor set the NEXT bit to 1 */
2356                 if (i == 0)
2357                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2358                 else
2359                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2360
2361                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2362                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2363
2364                         req->com_thrd[j].high =
2365                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2366                         req->com_thrd[j].high |=
2367                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2368                         req->com_thrd[j].low =
2369                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2370                         req->com_thrd[j].low |=
2371                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2372                 }
2373         }
2374
2375         /* Send 2 descriptors at one time */
2376         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2377         if (ret)
2378                 dev_err(&hdev->pdev->dev,
2379                         "common threshold config cmd failed %d\n", ret);
2380         return ret;
2381 }
2382
2383 static int hclge_common_wl_config(struct hclge_dev *hdev,
2384                                   struct hclge_pkt_buf_alloc *buf_alloc)
2385 {
2386         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2387         struct hclge_rx_com_wl *req;
2388         struct hclge_desc desc;
2389         int ret;
2390
2391         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2392
2393         req = (struct hclge_rx_com_wl *)desc.data;
2394         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2395         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2396
2397         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2398         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2399
2400         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2401         if (ret)
2402                 dev_err(&hdev->pdev->dev,
2403                         "common waterline config cmd failed %d\n", ret);
2404
2405         return ret;
2406 }
2407
2408 int hclge_buffer_alloc(struct hclge_dev *hdev)
2409 {
2410         struct hclge_pkt_buf_alloc *pkt_buf;
2411         int ret;
2412
2413         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2414         if (!pkt_buf)
2415                 return -ENOMEM;
2416
2417         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2418         if (ret) {
2419                 dev_err(&hdev->pdev->dev,
2420                         "could not calc tx buffer size for all TCs %d\n", ret);
2421                 goto out;
2422         }
2423
2424         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2425         if (ret) {
2426                 dev_err(&hdev->pdev->dev,
2427                         "could not alloc tx buffers %d\n", ret);
2428                 goto out;
2429         }
2430
2431         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2432         if (ret) {
2433                 dev_err(&hdev->pdev->dev,
2434                         "could not calc rx priv buffer size for all TCs %d\n",
2435                         ret);
2436                 goto out;
2437         }
2438
2439         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2440         if (ret) {
2441                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2442                         ret);
2443                 goto out;
2444         }
2445
2446         if (hnae3_dev_dcb_supported(hdev)) {
2447                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2448                 if (ret) {
2449                         dev_err(&hdev->pdev->dev,
2450                                 "could not configure rx private waterline %d\n",
2451                                 ret);
2452                         goto out;
2453                 }
2454
2455                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2456                 if (ret) {
2457                         dev_err(&hdev->pdev->dev,
2458                                 "could not configure common threshold %d\n",
2459                                 ret);
2460                         goto out;
2461                 }
2462         }
2463
2464         ret = hclge_common_wl_config(hdev, pkt_buf);
2465         if (ret)
2466                 dev_err(&hdev->pdev->dev,
2467                         "could not configure common waterline %d\n", ret);
2468
2469 out:
2470         kfree(pkt_buf);
2471         return ret;
2472 }
2473
2474 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2475 {
2476         struct hnae3_handle *roce = &vport->roce;
2477         struct hnae3_handle *nic = &vport->nic;
2478         struct hclge_dev *hdev = vport->back;
2479
2480         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2481
2482         if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2483                 return -EINVAL;
2484
2485         roce->rinfo.base_vector = hdev->roce_base_vector;
2486
2487         roce->rinfo.netdev = nic->kinfo.netdev;
2488         roce->rinfo.roce_io_base = hdev->hw.io_base;
2489         roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2490
2491         roce->pdev = nic->pdev;
2492         roce->ae_algo = nic->ae_algo;
2493         roce->numa_node_mask = nic->numa_node_mask;
2494
2495         return 0;
2496 }
2497
2498 static int hclge_init_msi(struct hclge_dev *hdev)
2499 {
2500         struct pci_dev *pdev = hdev->pdev;
2501         int vectors;
2502         int i;
2503
2504         vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2505                                         hdev->num_msi,
2506                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2507         if (vectors < 0) {
2508                 dev_err(&pdev->dev,
2509                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2510                         vectors);
2511                 return vectors;
2512         }
2513         if (vectors < hdev->num_msi)
2514                 dev_warn(&hdev->pdev->dev,
2515                          "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2516                          hdev->num_msi, vectors);
2517
2518         hdev->num_msi = vectors;
2519         hdev->num_msi_left = vectors;
2520
2521         hdev->base_msi_vector = pdev->irq;
2522         hdev->roce_base_vector = hdev->base_msi_vector +
2523                                 hdev->num_nic_msi;
2524
2525         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2526                                            sizeof(u16), GFP_KERNEL);
2527         if (!hdev->vector_status) {
2528                 pci_free_irq_vectors(pdev);
2529                 return -ENOMEM;
2530         }
2531
2532         for (i = 0; i < hdev->num_msi; i++)
2533                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2534
2535         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2536                                         sizeof(int), GFP_KERNEL);
2537         if (!hdev->vector_irq) {
2538                 pci_free_irq_vectors(pdev);
2539                 return -ENOMEM;
2540         }
2541
2542         return 0;
2543 }
2544
2545 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2546 {
2547         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2548                 duplex = HCLGE_MAC_FULL;
2549
2550         return duplex;
2551 }
2552
2553 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2554                                       u8 duplex)
2555 {
2556         struct hclge_config_mac_speed_dup_cmd *req;
2557         struct hclge_desc desc;
2558         int ret;
2559
2560         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2561
2562         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2563
2564         if (duplex)
2565                 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2566
2567         switch (speed) {
2568         case HCLGE_MAC_SPEED_10M:
2569                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2570                                 HCLGE_CFG_SPEED_S, 6);
2571                 break;
2572         case HCLGE_MAC_SPEED_100M:
2573                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2574                                 HCLGE_CFG_SPEED_S, 7);
2575                 break;
2576         case HCLGE_MAC_SPEED_1G:
2577                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2578                                 HCLGE_CFG_SPEED_S, 0);
2579                 break;
2580         case HCLGE_MAC_SPEED_10G:
2581                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2582                                 HCLGE_CFG_SPEED_S, 1);
2583                 break;
2584         case HCLGE_MAC_SPEED_25G:
2585                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2586                                 HCLGE_CFG_SPEED_S, 2);
2587                 break;
2588         case HCLGE_MAC_SPEED_40G:
2589                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2590                                 HCLGE_CFG_SPEED_S, 3);
2591                 break;
2592         case HCLGE_MAC_SPEED_50G:
2593                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2594                                 HCLGE_CFG_SPEED_S, 4);
2595                 break;
2596         case HCLGE_MAC_SPEED_100G:
2597                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2598                                 HCLGE_CFG_SPEED_S, 5);
2599                 break;
2600         case HCLGE_MAC_SPEED_200G:
2601                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2602                                 HCLGE_CFG_SPEED_S, 8);
2603                 break;
2604         default:
2605                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2606                 return -EINVAL;
2607         }
2608
2609         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2610                       1);
2611
2612         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2613         if (ret) {
2614                 dev_err(&hdev->pdev->dev,
2615                         "mac speed/duplex config cmd failed %d.\n", ret);
2616                 return ret;
2617         }
2618
2619         return 0;
2620 }
2621
2622 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2623 {
2624         struct hclge_mac *mac = &hdev->hw.mac;
2625         int ret;
2626
2627         duplex = hclge_check_speed_dup(duplex, speed);
2628         if (!mac->support_autoneg && mac->speed == speed &&
2629             mac->duplex == duplex)
2630                 return 0;
2631
2632         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2633         if (ret)
2634                 return ret;
2635
2636         hdev->hw.mac.speed = speed;
2637         hdev->hw.mac.duplex = duplex;
2638
2639         return 0;
2640 }
2641
2642 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2643                                      u8 duplex)
2644 {
2645         struct hclge_vport *vport = hclge_get_vport(handle);
2646         struct hclge_dev *hdev = vport->back;
2647
2648         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2649 }
2650
2651 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2652 {
2653         struct hclge_config_auto_neg_cmd *req;
2654         struct hclge_desc desc;
2655         u32 flag = 0;
2656         int ret;
2657
2658         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2659
2660         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2661         if (enable)
2662                 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2663         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2664
2665         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2666         if (ret)
2667                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2668                         ret);
2669
2670         return ret;
2671 }
2672
2673 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2674 {
2675         struct hclge_vport *vport = hclge_get_vport(handle);
2676         struct hclge_dev *hdev = vport->back;
2677
2678         if (!hdev->hw.mac.support_autoneg) {
2679                 if (enable) {
2680                         dev_err(&hdev->pdev->dev,
2681                                 "autoneg is not supported by current port\n");
2682                         return -EOPNOTSUPP;
2683                 } else {
2684                         return 0;
2685                 }
2686         }
2687
2688         return hclge_set_autoneg_en(hdev, enable);
2689 }
2690
2691 static int hclge_get_autoneg(struct hnae3_handle *handle)
2692 {
2693         struct hclge_vport *vport = hclge_get_vport(handle);
2694         struct hclge_dev *hdev = vport->back;
2695         struct phy_device *phydev = hdev->hw.mac.phydev;
2696
2697         if (phydev)
2698                 return phydev->autoneg;
2699
2700         return hdev->hw.mac.autoneg;
2701 }
2702
2703 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2704 {
2705         struct hclge_vport *vport = hclge_get_vport(handle);
2706         struct hclge_dev *hdev = vport->back;
2707         int ret;
2708
2709         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2710
2711         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2712         if (ret)
2713                 return ret;
2714         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2715 }
2716
2717 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2718 {
2719         struct hclge_vport *vport = hclge_get_vport(handle);
2720         struct hclge_dev *hdev = vport->back;
2721
2722         if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2723                 return hclge_set_autoneg_en(hdev, !halt);
2724
2725         return 0;
2726 }
2727
2728 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2729 {
2730         struct hclge_config_fec_cmd *req;
2731         struct hclge_desc desc;
2732         int ret;
2733
2734         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2735
2736         req = (struct hclge_config_fec_cmd *)desc.data;
2737         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2738                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2739         if (fec_mode & BIT(HNAE3_FEC_RS))
2740                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2741                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2742         if (fec_mode & BIT(HNAE3_FEC_BASER))
2743                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2744                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2745
2746         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2747         if (ret)
2748                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2749
2750         return ret;
2751 }
2752
2753 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2754 {
2755         struct hclge_vport *vport = hclge_get_vport(handle);
2756         struct hclge_dev *hdev = vport->back;
2757         struct hclge_mac *mac = &hdev->hw.mac;
2758         int ret;
2759
2760         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2761                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2762                 return -EINVAL;
2763         }
2764
2765         ret = hclge_set_fec_hw(hdev, fec_mode);
2766         if (ret)
2767                 return ret;
2768
2769         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2770         return 0;
2771 }
2772
2773 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2774                           u8 *fec_mode)
2775 {
2776         struct hclge_vport *vport = hclge_get_vport(handle);
2777         struct hclge_dev *hdev = vport->back;
2778         struct hclge_mac *mac = &hdev->hw.mac;
2779
2780         if (fec_ability)
2781                 *fec_ability = mac->fec_ability;
2782         if (fec_mode)
2783                 *fec_mode = mac->fec_mode;
2784 }
2785
2786 static int hclge_mac_init(struct hclge_dev *hdev)
2787 {
2788         struct hclge_mac *mac = &hdev->hw.mac;
2789         int ret;
2790
2791         hdev->support_sfp_query = true;
2792         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2793         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2794                                          hdev->hw.mac.duplex);
2795         if (ret)
2796                 return ret;
2797
2798         if (hdev->hw.mac.support_autoneg) {
2799                 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2800                 if (ret)
2801                         return ret;
2802         }
2803
2804         mac->link = 0;
2805
2806         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2807                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2808                 if (ret)
2809                         return ret;
2810         }
2811
2812         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2813         if (ret) {
2814                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2815                 return ret;
2816         }
2817
2818         ret = hclge_set_default_loopback(hdev);
2819         if (ret)
2820                 return ret;
2821
2822         ret = hclge_buffer_alloc(hdev);
2823         if (ret)
2824                 dev_err(&hdev->pdev->dev,
2825                         "allocate buffer fail, ret=%d\n", ret);
2826
2827         return ret;
2828 }
2829
2830 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2831 {
2832         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2833             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2834                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2835                                     hclge_wq, &hdev->service_task, 0);
2836 }
2837
2838 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2839 {
2840         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2841             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2842                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2843                                     hclge_wq, &hdev->service_task, 0);
2844 }
2845
2846 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2847 {
2848         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2849             !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2850                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2851                                     hclge_wq, &hdev->service_task,
2852                                     delay_time);
2853 }
2854
2855 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2856 {
2857         struct hclge_link_status_cmd *req;
2858         struct hclge_desc desc;
2859         int ret;
2860
2861         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2862         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2863         if (ret) {
2864                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2865                         ret);
2866                 return ret;
2867         }
2868
2869         req = (struct hclge_link_status_cmd *)desc.data;
2870         *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2871                 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2872
2873         return 0;
2874 }
2875
2876 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2877 {
2878         struct phy_device *phydev = hdev->hw.mac.phydev;
2879
2880         *link_status = HCLGE_LINK_STATUS_DOWN;
2881
2882         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2883                 return 0;
2884
2885         if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2886                 return 0;
2887
2888         return hclge_get_mac_link_status(hdev, link_status);
2889 }
2890
2891 static void hclge_push_link_status(struct hclge_dev *hdev)
2892 {
2893         struct hclge_vport *vport;
2894         int ret;
2895         u16 i;
2896
2897         for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
2898                 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
2899
2900                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
2901                     vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
2902                         continue;
2903
2904                 ret = hclge_push_vf_link_status(vport);
2905                 if (ret) {
2906                         dev_err(&hdev->pdev->dev,
2907                                 "failed to push link status to vf%u, ret = %d\n",
2908                                 i, ret);
2909                 }
2910         }
2911 }
2912
2913 static void hclge_update_link_status(struct hclge_dev *hdev)
2914 {
2915         struct hnae3_handle *rhandle = &hdev->vport[0].roce;
2916         struct hnae3_handle *handle = &hdev->vport[0].nic;
2917         struct hnae3_client *rclient = hdev->roce_client;
2918         struct hnae3_client *client = hdev->nic_client;
2919         int state;
2920         int ret;
2921
2922         if (!client)
2923                 return;
2924
2925         if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2926                 return;
2927
2928         ret = hclge_get_mac_phy_link(hdev, &state);
2929         if (ret) {
2930                 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2931                 return;
2932         }
2933
2934         if (state != hdev->hw.mac.link) {
2935                 client->ops->link_status_change(handle, state);
2936                 hclge_config_mac_tnl_int(hdev, state);
2937                 if (rclient && rclient->ops->link_status_change)
2938                         rclient->ops->link_status_change(rhandle, state);
2939
2940                 hdev->hw.mac.link = state;
2941                 hclge_push_link_status(hdev);
2942         }
2943
2944         clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2945 }
2946
2947 static void hclge_update_port_capability(struct hclge_dev *hdev,
2948                                          struct hclge_mac *mac)
2949 {
2950         if (hnae3_dev_fec_supported(hdev))
2951                 /* update fec ability by speed */
2952                 hclge_convert_setting_fec(mac);
2953
2954         /* firmware can not identify back plane type, the media type
2955          * read from configuration can help deal it
2956          */
2957         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2958             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2959                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2960         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2961                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2962
2963         if (mac->support_autoneg) {
2964                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2965                 linkmode_copy(mac->advertising, mac->supported);
2966         } else {
2967                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2968                                    mac->supported);
2969                 linkmode_zero(mac->advertising);
2970         }
2971 }
2972
2973 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2974 {
2975         struct hclge_sfp_info_cmd *resp;
2976         struct hclge_desc desc;
2977         int ret;
2978
2979         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2980         resp = (struct hclge_sfp_info_cmd *)desc.data;
2981         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2982         if (ret == -EOPNOTSUPP) {
2983                 dev_warn(&hdev->pdev->dev,
2984                          "IMP do not support get SFP speed %d\n", ret);
2985                 return ret;
2986         } else if (ret) {
2987                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2988                 return ret;
2989         }
2990
2991         *speed = le32_to_cpu(resp->speed);
2992
2993         return 0;
2994 }
2995
2996 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2997 {
2998         struct hclge_sfp_info_cmd *resp;
2999         struct hclge_desc desc;
3000         int ret;
3001
3002         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3003         resp = (struct hclge_sfp_info_cmd *)desc.data;
3004
3005         resp->query_type = QUERY_ACTIVE_SPEED;
3006
3007         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3008         if (ret == -EOPNOTSUPP) {
3009                 dev_warn(&hdev->pdev->dev,
3010                          "IMP does not support get SFP info %d\n", ret);
3011                 return ret;
3012         } else if (ret) {
3013                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3014                 return ret;
3015         }
3016
3017         /* In some case, mac speed get from IMP may be 0, it shouldn't be
3018          * set to mac->speed.
3019          */
3020         if (!le32_to_cpu(resp->speed))
3021                 return 0;
3022
3023         mac->speed = le32_to_cpu(resp->speed);
3024         /* if resp->speed_ability is 0, it means it's an old version
3025          * firmware, do not update these params
3026          */
3027         if (resp->speed_ability) {
3028                 mac->module_type = le32_to_cpu(resp->module_type);
3029                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
3030                 mac->autoneg = resp->autoneg;
3031                 mac->support_autoneg = resp->autoneg_ability;
3032                 mac->speed_type = QUERY_ACTIVE_SPEED;
3033                 if (!resp->active_fec)
3034                         mac->fec_mode = 0;
3035                 else
3036                         mac->fec_mode = BIT(resp->active_fec);
3037         } else {
3038                 mac->speed_type = QUERY_SFP_SPEED;
3039         }
3040
3041         return 0;
3042 }
3043
3044 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3045                                         struct ethtool_link_ksettings *cmd)
3046 {
3047         struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3048         struct hclge_vport *vport = hclge_get_vport(handle);
3049         struct hclge_phy_link_ksetting_0_cmd *req0;
3050         struct hclge_phy_link_ksetting_1_cmd *req1;
3051         u32 supported, advertising, lp_advertising;
3052         struct hclge_dev *hdev = vport->back;
3053         int ret;
3054
3055         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3056                                    true);
3057         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3058         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3059                                    true);
3060
3061         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3062         if (ret) {
3063                 dev_err(&hdev->pdev->dev,
3064                         "failed to get phy link ksetting, ret = %d.\n", ret);
3065                 return ret;
3066         }
3067
3068         req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3069         cmd->base.autoneg = req0->autoneg;
3070         cmd->base.speed = le32_to_cpu(req0->speed);
3071         cmd->base.duplex = req0->duplex;
3072         cmd->base.port = req0->port;
3073         cmd->base.transceiver = req0->transceiver;
3074         cmd->base.phy_address = req0->phy_address;
3075         cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3076         cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3077         supported = le32_to_cpu(req0->supported);
3078         advertising = le32_to_cpu(req0->advertising);
3079         lp_advertising = le32_to_cpu(req0->lp_advertising);
3080         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3081                                                 supported);
3082         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3083                                                 advertising);
3084         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3085                                                 lp_advertising);
3086
3087         req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3088         cmd->base.master_slave_cfg = req1->master_slave_cfg;
3089         cmd->base.master_slave_state = req1->master_slave_state;
3090
3091         return 0;
3092 }
3093
3094 static int
3095 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3096                              const struct ethtool_link_ksettings *cmd)
3097 {
3098         struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3099         struct hclge_vport *vport = hclge_get_vport(handle);
3100         struct hclge_phy_link_ksetting_0_cmd *req0;
3101         struct hclge_phy_link_ksetting_1_cmd *req1;
3102         struct hclge_dev *hdev = vport->back;
3103         u32 advertising;
3104         int ret;
3105
3106         if (cmd->base.autoneg == AUTONEG_DISABLE &&
3107             ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3108              (cmd->base.duplex != DUPLEX_HALF &&
3109               cmd->base.duplex != DUPLEX_FULL)))
3110                 return -EINVAL;
3111
3112         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3113                                    false);
3114         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3115         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3116                                    false);
3117
3118         req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3119         req0->autoneg = cmd->base.autoneg;
3120         req0->speed = cpu_to_le32(cmd->base.speed);
3121         req0->duplex = cmd->base.duplex;
3122         ethtool_convert_link_mode_to_legacy_u32(&advertising,
3123                                                 cmd->link_modes.advertising);
3124         req0->advertising = cpu_to_le32(advertising);
3125         req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3126
3127         req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3128         req1->master_slave_cfg = cmd->base.master_slave_cfg;
3129
3130         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3131         if (ret) {
3132                 dev_err(&hdev->pdev->dev,
3133                         "failed to set phy link ksettings, ret = %d.\n", ret);
3134                 return ret;
3135         }
3136
3137         hdev->hw.mac.autoneg = cmd->base.autoneg;
3138         hdev->hw.mac.speed = cmd->base.speed;
3139         hdev->hw.mac.duplex = cmd->base.duplex;
3140         linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3141
3142         return 0;
3143 }
3144
3145 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3146 {
3147         struct ethtool_link_ksettings cmd;
3148         int ret;
3149
3150         if (!hnae3_dev_phy_imp_supported(hdev))
3151                 return 0;
3152
3153         ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3154         if (ret)
3155                 return ret;
3156
3157         hdev->hw.mac.autoneg = cmd.base.autoneg;
3158         hdev->hw.mac.speed = cmd.base.speed;
3159         hdev->hw.mac.duplex = cmd.base.duplex;
3160
3161         return 0;
3162 }
3163
3164 static int hclge_tp_port_init(struct hclge_dev *hdev)
3165 {
3166         struct ethtool_link_ksettings cmd;
3167
3168         if (!hnae3_dev_phy_imp_supported(hdev))
3169                 return 0;
3170
3171         cmd.base.autoneg = hdev->hw.mac.autoneg;
3172         cmd.base.speed = hdev->hw.mac.speed;
3173         cmd.base.duplex = hdev->hw.mac.duplex;
3174         linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3175
3176         return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3177 }
3178
3179 static int hclge_update_port_info(struct hclge_dev *hdev)
3180 {
3181         struct hclge_mac *mac = &hdev->hw.mac;
3182         int speed = HCLGE_MAC_SPEED_UNKNOWN;
3183         int ret;
3184
3185         /* get the port info from SFP cmd if not copper port */
3186         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3187                 return hclge_update_tp_port_info(hdev);
3188
3189         /* if IMP does not support get SFP/qSFP info, return directly */
3190         if (!hdev->support_sfp_query)
3191                 return 0;
3192
3193         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3194                 ret = hclge_get_sfp_info(hdev, mac);
3195         else
3196                 ret = hclge_get_sfp_speed(hdev, &speed);
3197
3198         if (ret == -EOPNOTSUPP) {
3199                 hdev->support_sfp_query = false;
3200                 return ret;
3201         } else if (ret) {
3202                 return ret;
3203         }
3204
3205         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3206                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3207                         hclge_update_port_capability(hdev, mac);
3208                         return 0;
3209                 }
3210                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3211                                                HCLGE_MAC_FULL);
3212         } else {
3213                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3214                         return 0; /* do nothing if no SFP */
3215
3216                 /* must config full duplex for SFP */
3217                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3218         }
3219 }
3220
3221 static int hclge_get_status(struct hnae3_handle *handle)
3222 {
3223         struct hclge_vport *vport = hclge_get_vport(handle);
3224         struct hclge_dev *hdev = vport->back;
3225
3226         hclge_update_link_status(hdev);
3227
3228         return hdev->hw.mac.link;
3229 }
3230
3231 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3232 {
3233         if (!pci_num_vf(hdev->pdev)) {
3234                 dev_err(&hdev->pdev->dev,
3235                         "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3236                 return NULL;
3237         }
3238
3239         if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3240                 dev_err(&hdev->pdev->dev,
3241                         "vf id(%d) is out of range(0 <= vfid < %d)\n",
3242                         vf, pci_num_vf(hdev->pdev));
3243                 return NULL;
3244         }
3245
3246         /* VF start from 1 in vport */
3247         vf += HCLGE_VF_VPORT_START_NUM;
3248         return &hdev->vport[vf];
3249 }
3250
3251 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3252                                struct ifla_vf_info *ivf)
3253 {
3254         struct hclge_vport *vport = hclge_get_vport(handle);
3255         struct hclge_dev *hdev = vport->back;
3256
3257         vport = hclge_get_vf_vport(hdev, vf);
3258         if (!vport)
3259                 return -EINVAL;
3260
3261         ivf->vf = vf;
3262         ivf->linkstate = vport->vf_info.link_state;
3263         ivf->spoofchk = vport->vf_info.spoofchk;
3264         ivf->trusted = vport->vf_info.trusted;
3265         ivf->min_tx_rate = 0;
3266         ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3267         ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3268         ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3269         ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3270         ether_addr_copy(ivf->mac, vport->vf_info.mac);
3271
3272         return 0;
3273 }
3274
3275 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3276                                    int link_state)
3277 {
3278         struct hclge_vport *vport = hclge_get_vport(handle);
3279         struct hclge_dev *hdev = vport->back;
3280         int link_state_old;
3281         int ret;
3282
3283         vport = hclge_get_vf_vport(hdev, vf);
3284         if (!vport)
3285                 return -EINVAL;
3286
3287         link_state_old = vport->vf_info.link_state;
3288         vport->vf_info.link_state = link_state;
3289
3290         ret = hclge_push_vf_link_status(vport);
3291         if (ret) {
3292                 vport->vf_info.link_state = link_state_old;
3293                 dev_err(&hdev->pdev->dev,
3294                         "failed to push vf%d link status, ret = %d\n", vf, ret);
3295         }
3296
3297         return ret;
3298 }
3299
3300 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3301 {
3302         u32 cmdq_src_reg, msix_src_reg;
3303
3304         /* fetch the events from their corresponding regs */
3305         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3306         msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3307
3308         /* Assumption: If by any chance reset and mailbox events are reported
3309          * together then we will only process reset event in this go and will
3310          * defer the processing of the mailbox events. Since, we would have not
3311          * cleared RX CMDQ event this time we would receive again another
3312          * interrupt from H/W just for the mailbox.
3313          *
3314          * check for vector0 reset event sources
3315          */
3316         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3317                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3318                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3319                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3320                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3321                 hdev->rst_stats.imp_rst_cnt++;
3322                 return HCLGE_VECTOR0_EVENT_RST;
3323         }
3324
3325         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3326                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3327                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3328                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3329                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3330                 hdev->rst_stats.global_rst_cnt++;
3331                 return HCLGE_VECTOR0_EVENT_RST;
3332         }
3333
3334         /* check for vector0 msix event source */
3335         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3336                 *clearval = msix_src_reg;
3337                 return HCLGE_VECTOR0_EVENT_ERR;
3338         }
3339
3340         /* check for vector0 mailbox(=CMDQ RX) event source */
3341         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3342                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3343                 *clearval = cmdq_src_reg;
3344                 return HCLGE_VECTOR0_EVENT_MBX;
3345         }
3346
3347         /* print other vector0 event source */
3348         dev_info(&hdev->pdev->dev,
3349                  "CMDQ INT status:0x%x, other INT status:0x%x\n",
3350                  cmdq_src_reg, msix_src_reg);
3351         *clearval = msix_src_reg;
3352
3353         return HCLGE_VECTOR0_EVENT_OTHER;
3354 }
3355
3356 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3357                                     u32 regclr)
3358 {
3359         switch (event_type) {
3360         case HCLGE_VECTOR0_EVENT_RST:
3361                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3362                 break;
3363         case HCLGE_VECTOR0_EVENT_MBX:
3364                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3365                 break;
3366         default:
3367                 break;
3368         }
3369 }
3370
3371 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3372 {
3373         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3374                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3375                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3376                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3377         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3378 }
3379
3380 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3381 {
3382         writel(enable ? 1 : 0, vector->addr);
3383 }
3384
3385 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3386 {
3387         struct hclge_dev *hdev = data;
3388         u32 clearval = 0;
3389         u32 event_cause;
3390
3391         hclge_enable_vector(&hdev->misc_vector, false);
3392         event_cause = hclge_check_event_cause(hdev, &clearval);
3393
3394         /* vector 0 interrupt is shared with reset and mailbox source events.*/
3395         switch (event_cause) {
3396         case HCLGE_VECTOR0_EVENT_ERR:
3397                 /* we do not know what type of reset is required now. This could
3398                  * only be decided after we fetch the type of errors which
3399                  * caused this event. Therefore, we will do below for now:
3400                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3401                  *    have defered type of reset to be used.
3402                  * 2. Schedule the reset service task.
3403                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3404                  *    will fetch the correct type of reset.  This would be done
3405                  *    by first decoding the types of errors.
3406                  */
3407                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3408                 fallthrough;
3409         case HCLGE_VECTOR0_EVENT_RST:
3410                 hclge_reset_task_schedule(hdev);
3411                 break;
3412         case HCLGE_VECTOR0_EVENT_MBX:
3413                 /* If we are here then,
3414                  * 1. Either we are not handling any mbx task and we are not
3415                  *    scheduled as well
3416                  *                        OR
3417                  * 2. We could be handling a mbx task but nothing more is
3418                  *    scheduled.
3419                  * In both cases, we should schedule mbx task as there are more
3420                  * mbx messages reported by this interrupt.
3421                  */
3422                 hclge_mbx_task_schedule(hdev);
3423                 break;
3424         default:
3425                 dev_warn(&hdev->pdev->dev,
3426                          "received unknown or unhandled event of vector0\n");
3427                 break;
3428         }
3429
3430         hclge_clear_event_cause(hdev, event_cause, clearval);
3431
3432         /* Enable interrupt if it is not cause by reset. And when
3433          * clearval equal to 0, it means interrupt status may be
3434          * cleared by hardware before driver reads status register.
3435          * For this case, vector0 interrupt also should be enabled.
3436          */
3437         if (!clearval ||
3438             event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3439                 hclge_enable_vector(&hdev->misc_vector, true);
3440         }
3441
3442         return IRQ_HANDLED;
3443 }
3444
3445 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3446 {
3447         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3448                 dev_warn(&hdev->pdev->dev,
3449                          "vector(vector_id %d) has been freed.\n", vector_id);
3450                 return;
3451         }
3452
3453         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3454         hdev->num_msi_left += 1;
3455         hdev->num_msi_used -= 1;
3456 }
3457
3458 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3459 {
3460         struct hclge_misc_vector *vector = &hdev->misc_vector;
3461
3462         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3463
3464         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3465         hdev->vector_status[0] = 0;
3466
3467         hdev->num_msi_left -= 1;
3468         hdev->num_msi_used += 1;
3469 }
3470
3471 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3472                                       const cpumask_t *mask)
3473 {
3474         struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3475                                               affinity_notify);
3476
3477         cpumask_copy(&hdev->affinity_mask, mask);
3478 }
3479
3480 static void hclge_irq_affinity_release(struct kref *ref)
3481 {
3482 }
3483
3484 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3485 {
3486         irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3487                               &hdev->affinity_mask);
3488
3489         hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3490         hdev->affinity_notify.release = hclge_irq_affinity_release;
3491         irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3492                                   &hdev->affinity_notify);
3493 }
3494
3495 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3496 {
3497         irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3498         irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3499 }
3500
3501 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3502 {
3503         int ret;
3504
3505         hclge_get_misc_vector(hdev);
3506
3507         /* this would be explicitly freed in the end */
3508         snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3509                  HCLGE_NAME, pci_name(hdev->pdev));
3510         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3511                           0, hdev->misc_vector.name, hdev);
3512         if (ret) {
3513                 hclge_free_vector(hdev, 0);
3514                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3515                         hdev->misc_vector.vector_irq);
3516         }
3517
3518         return ret;
3519 }
3520
3521 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3522 {
3523         free_irq(hdev->misc_vector.vector_irq, hdev);
3524         hclge_free_vector(hdev, 0);
3525 }
3526
3527 int hclge_notify_client(struct hclge_dev *hdev,
3528                         enum hnae3_reset_notify_type type)
3529 {
3530         struct hnae3_handle *handle = &hdev->vport[0].nic;
3531         struct hnae3_client *client = hdev->nic_client;
3532         int ret;
3533
3534         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3535                 return 0;
3536
3537         if (!client->ops->reset_notify)
3538                 return -EOPNOTSUPP;
3539
3540         ret = client->ops->reset_notify(handle, type);
3541         if (ret)
3542                 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3543                         type, ret);
3544
3545         return ret;
3546 }
3547
3548 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3549                                     enum hnae3_reset_notify_type type)
3550 {
3551         struct hnae3_handle *handle = &hdev->vport[0].roce;
3552         struct hnae3_client *client = hdev->roce_client;
3553         int ret;
3554
3555         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3556                 return 0;
3557
3558         if (!client->ops->reset_notify)
3559                 return -EOPNOTSUPP;
3560
3561         ret = client->ops->reset_notify(handle, type);
3562         if (ret)
3563                 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3564                         type, ret);
3565
3566         return ret;
3567 }
3568
3569 static int hclge_reset_wait(struct hclge_dev *hdev)
3570 {
3571 #define HCLGE_RESET_WATI_MS     100
3572 #define HCLGE_RESET_WAIT_CNT    350
3573
3574         u32 val, reg, reg_bit;
3575         u32 cnt = 0;
3576
3577         switch (hdev->reset_type) {
3578         case HNAE3_IMP_RESET:
3579                 reg = HCLGE_GLOBAL_RESET_REG;
3580                 reg_bit = HCLGE_IMP_RESET_BIT;
3581                 break;
3582         case HNAE3_GLOBAL_RESET:
3583                 reg = HCLGE_GLOBAL_RESET_REG;
3584                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3585                 break;
3586         case HNAE3_FUNC_RESET:
3587                 reg = HCLGE_FUN_RST_ING;
3588                 reg_bit = HCLGE_FUN_RST_ING_B;
3589                 break;
3590         default:
3591                 dev_err(&hdev->pdev->dev,
3592                         "Wait for unsupported reset type: %d\n",
3593                         hdev->reset_type);
3594                 return -EINVAL;
3595         }
3596
3597         val = hclge_read_dev(&hdev->hw, reg);
3598         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3599                 msleep(HCLGE_RESET_WATI_MS);
3600                 val = hclge_read_dev(&hdev->hw, reg);
3601                 cnt++;
3602         }
3603
3604         if (cnt >= HCLGE_RESET_WAIT_CNT) {
3605                 dev_warn(&hdev->pdev->dev,
3606                          "Wait for reset timeout: %d\n", hdev->reset_type);
3607                 return -EBUSY;
3608         }
3609
3610         return 0;
3611 }
3612
3613 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3614 {
3615         struct hclge_vf_rst_cmd *req;
3616         struct hclge_desc desc;
3617
3618         req = (struct hclge_vf_rst_cmd *)desc.data;
3619         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3620         req->dest_vfid = func_id;
3621
3622         if (reset)
3623                 req->vf_rst = 0x1;
3624
3625         return hclge_cmd_send(&hdev->hw, &desc, 1);
3626 }
3627
3628 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3629 {
3630         int i;
3631
3632         for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3633                 struct hclge_vport *vport = &hdev->vport[i];
3634                 int ret;
3635
3636                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3637                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3638                 if (ret) {
3639                         dev_err(&hdev->pdev->dev,
3640                                 "set vf(%u) rst failed %d!\n",
3641                                 vport->vport_id, ret);
3642                         return ret;
3643                 }
3644
3645                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3646                         continue;
3647
3648                 /* Inform VF to process the reset.
3649                  * hclge_inform_reset_assert_to_vf may fail if VF
3650                  * driver is not loaded.
3651                  */
3652                 ret = hclge_inform_reset_assert_to_vf(vport);
3653                 if (ret)
3654                         dev_warn(&hdev->pdev->dev,
3655                                  "inform reset to vf(%u) failed %d!\n",
3656                                  vport->vport_id, ret);
3657         }
3658
3659         return 0;
3660 }
3661
3662 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3663 {
3664         if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3665             test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3666             test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3667                 return;
3668
3669         hclge_mbx_handler(hdev);
3670
3671         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3672 }
3673
3674 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3675 {
3676         struct hclge_pf_rst_sync_cmd *req;
3677         struct hclge_desc desc;
3678         int cnt = 0;
3679         int ret;
3680
3681         req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3682         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3683
3684         do {
3685                 /* vf need to down netdev by mbx during PF or FLR reset */
3686                 hclge_mailbox_service_task(hdev);
3687
3688                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3689                 /* for compatible with old firmware, wait
3690                  * 100 ms for VF to stop IO
3691                  */
3692                 if (ret == -EOPNOTSUPP) {
3693                         msleep(HCLGE_RESET_SYNC_TIME);
3694                         return;
3695                 } else if (ret) {
3696                         dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3697                                  ret);
3698                         return;
3699                 } else if (req->all_vf_ready) {
3700                         return;
3701                 }
3702                 msleep(HCLGE_PF_RESET_SYNC_TIME);
3703                 hclge_cmd_reuse_desc(&desc, true);
3704         } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3705
3706         dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3707 }
3708
3709 void hclge_report_hw_error(struct hclge_dev *hdev,
3710                            enum hnae3_hw_error_type type)
3711 {
3712         struct hnae3_client *client = hdev->nic_client;
3713
3714         if (!client || !client->ops->process_hw_error ||
3715             !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3716                 return;
3717
3718         client->ops->process_hw_error(&hdev->vport[0].nic, type);
3719 }
3720
3721 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3722 {
3723         u32 reg_val;
3724
3725         reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3726         if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3727                 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3728                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3729                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3730         }
3731
3732         if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3733                 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3734                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3735                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3736         }
3737 }
3738
3739 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3740 {
3741         struct hclge_desc desc;
3742         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3743         int ret;
3744
3745         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3746         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3747         req->fun_reset_vfid = func_id;
3748
3749         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3750         if (ret)
3751                 dev_err(&hdev->pdev->dev,
3752                         "send function reset cmd fail, status =%d\n", ret);
3753
3754         return ret;
3755 }
3756
3757 static void hclge_do_reset(struct hclge_dev *hdev)
3758 {
3759         struct hnae3_handle *handle = &hdev->vport[0].nic;
3760         struct pci_dev *pdev = hdev->pdev;
3761         u32 val;
3762
3763         if (hclge_get_hw_reset_stat(handle)) {
3764                 dev_info(&pdev->dev, "hardware reset not finish\n");
3765                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3766                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3767                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3768                 return;
3769         }
3770
3771         switch (hdev->reset_type) {
3772         case HNAE3_GLOBAL_RESET:
3773                 dev_info(&pdev->dev, "global reset requested\n");
3774                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3775                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3776                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3777                 break;
3778         case HNAE3_FUNC_RESET:
3779                 dev_info(&pdev->dev, "PF reset requested\n");
3780                 /* schedule again to check later */
3781                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3782                 hclge_reset_task_schedule(hdev);
3783                 break;
3784         default:
3785                 dev_warn(&pdev->dev,
3786                          "unsupported reset type: %d\n", hdev->reset_type);
3787                 break;
3788         }
3789 }
3790
3791 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3792                                                    unsigned long *addr)
3793 {
3794         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3795         struct hclge_dev *hdev = ae_dev->priv;
3796
3797         /* first, resolve any unknown reset type to the known type(s) */
3798         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3799                 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3800                                         HCLGE_MISC_VECTOR_INT_STS);
3801                 /* we will intentionally ignore any errors from this function
3802                  *  as we will end up in *some* reset request in any case
3803                  */
3804                 if (hclge_handle_hw_msix_error(hdev, addr))
3805                         dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3806                                  msix_sts_reg);
3807
3808                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3809                 /* We defered the clearing of the error event which caused
3810                  * interrupt since it was not posssible to do that in
3811                  * interrupt context (and this is the reason we introduced
3812                  * new UNKNOWN reset type). Now, the errors have been
3813                  * handled and cleared in hardware we can safely enable
3814                  * interrupts. This is an exception to the norm.
3815                  */
3816                 hclge_enable_vector(&hdev->misc_vector, true);
3817         }
3818
3819         /* return the highest priority reset level amongst all */
3820         if (test_bit(HNAE3_IMP_RESET, addr)) {
3821                 rst_level = HNAE3_IMP_RESET;
3822                 clear_bit(HNAE3_IMP_RESET, addr);
3823                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3824                 clear_bit(HNAE3_FUNC_RESET, addr);
3825         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3826                 rst_level = HNAE3_GLOBAL_RESET;
3827                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3828                 clear_bit(HNAE3_FUNC_RESET, addr);
3829         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3830                 rst_level = HNAE3_FUNC_RESET;
3831                 clear_bit(HNAE3_FUNC_RESET, addr);
3832         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3833                 rst_level = HNAE3_FLR_RESET;
3834                 clear_bit(HNAE3_FLR_RESET, addr);
3835         }
3836
3837         if (hdev->reset_type != HNAE3_NONE_RESET &&
3838             rst_level < hdev->reset_type)
3839                 return HNAE3_NONE_RESET;
3840
3841         return rst_level;
3842 }
3843
3844 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3845 {
3846         u32 clearval = 0;
3847
3848         switch (hdev->reset_type) {
3849         case HNAE3_IMP_RESET:
3850                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3851                 break;
3852         case HNAE3_GLOBAL_RESET:
3853                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3854                 break;
3855         default:
3856                 break;
3857         }
3858
3859         if (!clearval)
3860                 return;
3861
3862         /* For revision 0x20, the reset interrupt source
3863          * can only be cleared after hardware reset done
3864          */
3865         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3866                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3867                                 clearval);
3868
3869         hclge_enable_vector(&hdev->misc_vector, true);
3870 }
3871
3872 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3873 {
3874         u32 reg_val;
3875
3876         reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3877         if (enable)
3878                 reg_val |= HCLGE_NIC_SW_RST_RDY;
3879         else
3880                 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3881
3882         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3883 }
3884
3885 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3886 {
3887         int ret;
3888
3889         ret = hclge_set_all_vf_rst(hdev, true);
3890         if (ret)
3891                 return ret;
3892
3893         hclge_func_reset_sync_vf(hdev);
3894
3895         return 0;
3896 }
3897
3898 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3899 {
3900         u32 reg_val;
3901         int ret = 0;
3902
3903         switch (hdev->reset_type) {
3904         case HNAE3_FUNC_RESET:
3905                 ret = hclge_func_reset_notify_vf(hdev);
3906                 if (ret)
3907                         return ret;
3908
3909                 ret = hclge_func_reset_cmd(hdev, 0);
3910                 if (ret) {
3911                         dev_err(&hdev->pdev->dev,
3912                                 "asserting function reset fail %d!\n", ret);
3913                         return ret;
3914                 }
3915
3916                 /* After performaning pf reset, it is not necessary to do the
3917                  * mailbox handling or send any command to firmware, because
3918                  * any mailbox handling or command to firmware is only valid
3919                  * after hclge_cmd_init is called.
3920                  */
3921                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3922                 hdev->rst_stats.pf_rst_cnt++;
3923                 break;
3924         case HNAE3_FLR_RESET:
3925                 ret = hclge_func_reset_notify_vf(hdev);
3926                 if (ret)
3927                         return ret;
3928                 break;
3929         case HNAE3_IMP_RESET:
3930                 hclge_handle_imp_error(hdev);
3931                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3932                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3933                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3934                 break;
3935         default:
3936                 break;
3937         }
3938
3939         /* inform hardware that preparatory work is done */
3940         msleep(HCLGE_RESET_SYNC_TIME);
3941         hclge_reset_handshake(hdev, true);
3942         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3943
3944         return ret;
3945 }
3946
3947 static void hclge_show_rst_info(struct hclge_dev *hdev)
3948 {
3949         char *buf;
3950
3951         buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
3952         if (!buf)
3953                 return;
3954
3955         hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
3956
3957         dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
3958
3959         kfree(buf);
3960 }
3961
3962 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3963 {
3964 #define MAX_RESET_FAIL_CNT 5
3965
3966         if (hdev->reset_pending) {
3967                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3968                          hdev->reset_pending);
3969                 return true;
3970         } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3971                    HCLGE_RESET_INT_M) {
3972                 dev_info(&hdev->pdev->dev,
3973                          "reset failed because new reset interrupt\n");
3974                 hclge_clear_reset_cause(hdev);
3975                 return false;
3976         } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3977                 hdev->rst_stats.reset_fail_cnt++;
3978                 set_bit(hdev->reset_type, &hdev->reset_pending);
3979                 dev_info(&hdev->pdev->dev,
3980                          "re-schedule reset task(%u)\n",
3981                          hdev->rst_stats.reset_fail_cnt);
3982                 return true;
3983         }
3984
3985         hclge_clear_reset_cause(hdev);
3986
3987         /* recover the handshake status when reset fail */
3988         hclge_reset_handshake(hdev, true);
3989
3990         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3991
3992         hclge_show_rst_info(hdev);
3993
3994         set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3995
3996         return false;
3997 }
3998
3999 static void hclge_update_reset_level(struct hclge_dev *hdev)
4000 {
4001         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4002         enum hnae3_reset_type reset_level;
4003
4004         /* reset request will not be set during reset, so clear
4005          * pending reset request to avoid unnecessary reset
4006          * caused by the same reason.
4007          */
4008         hclge_get_reset_level(ae_dev, &hdev->reset_request);
4009
4010         /* if default_reset_request has a higher level reset request,
4011          * it should be handled as soon as possible. since some errors
4012          * need this kind of reset to fix.
4013          */
4014         reset_level = hclge_get_reset_level(ae_dev,
4015                                             &hdev->default_reset_request);
4016         if (reset_level != HNAE3_NONE_RESET)
4017                 set_bit(reset_level, &hdev->reset_request);
4018 }
4019
4020 static int hclge_set_rst_done(struct hclge_dev *hdev)
4021 {
4022         struct hclge_pf_rst_done_cmd *req;
4023         struct hclge_desc desc;
4024         int ret;
4025
4026         req = (struct hclge_pf_rst_done_cmd *)desc.data;
4027         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
4028         req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
4029
4030         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4031         /* To be compatible with the old firmware, which does not support
4032          * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4033          * return success
4034          */
4035         if (ret == -EOPNOTSUPP) {
4036                 dev_warn(&hdev->pdev->dev,
4037                          "current firmware does not support command(0x%x)!\n",
4038                          HCLGE_OPC_PF_RST_DONE);
4039                 return 0;
4040         } else if (ret) {
4041                 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4042                         ret);
4043         }
4044
4045         return ret;
4046 }
4047
4048 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4049 {
4050         int ret = 0;
4051
4052         switch (hdev->reset_type) {
4053         case HNAE3_FUNC_RESET:
4054         case HNAE3_FLR_RESET:
4055                 ret = hclge_set_all_vf_rst(hdev, false);
4056                 break;
4057         case HNAE3_GLOBAL_RESET:
4058         case HNAE3_IMP_RESET:
4059                 ret = hclge_set_rst_done(hdev);
4060                 break;
4061         default:
4062                 break;
4063         }
4064
4065         /* clear up the handshake status after re-initialize done */
4066         hclge_reset_handshake(hdev, false);
4067
4068         return ret;
4069 }
4070
4071 static int hclge_reset_stack(struct hclge_dev *hdev)
4072 {
4073         int ret;
4074
4075         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4076         if (ret)
4077                 return ret;
4078
4079         ret = hclge_reset_ae_dev(hdev->ae_dev);
4080         if (ret)
4081                 return ret;
4082
4083         return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4084 }
4085
4086 static int hclge_reset_prepare(struct hclge_dev *hdev)
4087 {
4088         int ret;
4089
4090         hdev->rst_stats.reset_cnt++;
4091         /* perform reset of the stack & ae device for a client */
4092         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4093         if (ret)
4094                 return ret;
4095
4096         rtnl_lock();
4097         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4098         rtnl_unlock();
4099         if (ret)
4100                 return ret;
4101
4102         return hclge_reset_prepare_wait(hdev);
4103 }
4104
4105 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4106 {
4107         int ret;
4108
4109         hdev->rst_stats.hw_reset_done_cnt++;
4110
4111         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4112         if (ret)
4113                 return ret;
4114
4115         rtnl_lock();
4116         ret = hclge_reset_stack(hdev);
4117         rtnl_unlock();
4118         if (ret)
4119                 return ret;
4120
4121         hclge_clear_reset_cause(hdev);
4122
4123         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4124         /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4125          * times
4126          */
4127         if (ret &&
4128             hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4129                 return ret;
4130
4131         ret = hclge_reset_prepare_up(hdev);
4132         if (ret)
4133                 return ret;
4134
4135         rtnl_lock();
4136         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4137         rtnl_unlock();
4138         if (ret)
4139                 return ret;
4140
4141         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4142         if (ret)
4143                 return ret;
4144
4145         hdev->last_reset_time = jiffies;
4146         hdev->rst_stats.reset_fail_cnt = 0;
4147         hdev->rst_stats.reset_done_cnt++;
4148         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4149
4150         hclge_update_reset_level(hdev);
4151
4152         return 0;
4153 }
4154
4155 static void hclge_reset(struct hclge_dev *hdev)
4156 {
4157         if (hclge_reset_prepare(hdev))
4158                 goto err_reset;
4159
4160         if (hclge_reset_wait(hdev))
4161                 goto err_reset;
4162
4163         if (hclge_reset_rebuild(hdev))
4164                 goto err_reset;
4165
4166         return;
4167
4168 err_reset:
4169         if (hclge_reset_err_handle(hdev))
4170                 hclge_reset_task_schedule(hdev);
4171 }
4172
4173 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4174 {
4175         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4176         struct hclge_dev *hdev = ae_dev->priv;
4177
4178         /* We might end up getting called broadly because of 2 below cases:
4179          * 1. Recoverable error was conveyed through APEI and only way to bring
4180          *    normalcy is to reset.
4181          * 2. A new reset request from the stack due to timeout
4182          *
4183          * check if this is a new reset request and we are not here just because
4184          * last reset attempt did not succeed and watchdog hit us again. We will
4185          * know this if last reset request did not occur very recently (watchdog
4186          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4187          * In case of new request we reset the "reset level" to PF reset.
4188          * And if it is a repeat reset request of the most recent one then we
4189          * want to make sure we throttle the reset request. Therefore, we will
4190          * not allow it again before 3*HZ times.
4191          */
4192
4193         if (time_before(jiffies, (hdev->last_reset_time +
4194                                   HCLGE_RESET_INTERVAL))) {
4195                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4196                 return;
4197         }
4198
4199         if (hdev->default_reset_request) {
4200                 hdev->reset_level =
4201                         hclge_get_reset_level(ae_dev,
4202                                               &hdev->default_reset_request);
4203         } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4204                 hdev->reset_level = HNAE3_FUNC_RESET;
4205         }
4206
4207         dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4208                  hdev->reset_level);
4209
4210         /* request reset & schedule reset task */
4211         set_bit(hdev->reset_level, &hdev->reset_request);
4212         hclge_reset_task_schedule(hdev);
4213
4214         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4215                 hdev->reset_level++;
4216 }
4217
4218 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4219                                         enum hnae3_reset_type rst_type)
4220 {
4221         struct hclge_dev *hdev = ae_dev->priv;
4222
4223         set_bit(rst_type, &hdev->default_reset_request);
4224 }
4225
4226 static void hclge_reset_timer(struct timer_list *t)
4227 {
4228         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4229
4230         /* if default_reset_request has no value, it means that this reset
4231          * request has already be handled, so just return here
4232          */
4233         if (!hdev->default_reset_request)
4234                 return;
4235
4236         dev_info(&hdev->pdev->dev,
4237                  "triggering reset in reset timer\n");
4238         hclge_reset_event(hdev->pdev, NULL);
4239 }
4240
4241 static void hclge_reset_subtask(struct hclge_dev *hdev)
4242 {
4243         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4244
4245         /* check if there is any ongoing reset in the hardware. This status can
4246          * be checked from reset_pending. If there is then, we need to wait for
4247          * hardware to complete reset.
4248          *    a. If we are able to figure out in reasonable time that hardware
4249          *       has fully resetted then, we can proceed with driver, client
4250          *       reset.
4251          *    b. else, we can come back later to check this status so re-sched
4252          *       now.
4253          */
4254         hdev->last_reset_time = jiffies;
4255         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4256         if (hdev->reset_type != HNAE3_NONE_RESET)
4257                 hclge_reset(hdev);
4258
4259         /* check if we got any *new* reset requests to be honored */
4260         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4261         if (hdev->reset_type != HNAE3_NONE_RESET)
4262                 hclge_do_reset(hdev);
4263
4264         hdev->reset_type = HNAE3_NONE_RESET;
4265 }
4266
4267 static void hclge_reset_service_task(struct hclge_dev *hdev)
4268 {
4269         if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4270                 return;
4271
4272         down(&hdev->reset_sem);
4273         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4274
4275         hclge_reset_subtask(hdev);
4276
4277         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4278         up(&hdev->reset_sem);
4279 }
4280
4281 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4282 {
4283         int i;
4284
4285         /* start from vport 1 for PF is always alive */
4286         for (i = 1; i < hdev->num_alloc_vport; i++) {
4287                 struct hclge_vport *vport = &hdev->vport[i];
4288
4289                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4290                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4291
4292                 /* If vf is not alive, set to default value */
4293                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4294                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4295         }
4296 }
4297
4298 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4299 {
4300         unsigned long delta = round_jiffies_relative(HZ);
4301
4302         if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4303                 return;
4304
4305         /* Always handle the link updating to make sure link state is
4306          * updated when it is triggered by mbx.
4307          */
4308         hclge_update_link_status(hdev);
4309         hclge_sync_mac_table(hdev);
4310         hclge_sync_promisc_mode(hdev);
4311         hclge_sync_fd_table(hdev);
4312
4313         if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4314                 delta = jiffies - hdev->last_serv_processed;
4315
4316                 if (delta < round_jiffies_relative(HZ)) {
4317                         delta = round_jiffies_relative(HZ) - delta;
4318                         goto out;
4319                 }
4320         }
4321
4322         hdev->serv_processed_cnt++;
4323         hclge_update_vport_alive(hdev);
4324
4325         if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4326                 hdev->last_serv_processed = jiffies;
4327                 goto out;
4328         }
4329
4330         if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4331                 hclge_update_stats_for_all(hdev);
4332
4333         hclge_update_port_info(hdev);
4334         hclge_sync_vlan_filter(hdev);
4335
4336         if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4337                 hclge_rfs_filter_expire(hdev);
4338
4339         hdev->last_serv_processed = jiffies;
4340
4341 out:
4342         hclge_task_schedule(hdev, delta);
4343 }
4344
4345 static void hclge_service_task(struct work_struct *work)
4346 {
4347         struct hclge_dev *hdev =
4348                 container_of(work, struct hclge_dev, service_task.work);
4349
4350         hclge_reset_service_task(hdev);
4351         hclge_mailbox_service_task(hdev);
4352         hclge_periodic_service_task(hdev);
4353
4354         /* Handle reset and mbx again in case periodical task delays the
4355          * handling by calling hclge_task_schedule() in
4356          * hclge_periodic_service_task().
4357          */
4358         hclge_reset_service_task(hdev);
4359         hclge_mailbox_service_task(hdev);
4360 }
4361
4362 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4363 {
4364         /* VF handle has no client */
4365         if (!handle->client)
4366                 return container_of(handle, struct hclge_vport, nic);
4367         else if (handle->client->type == HNAE3_CLIENT_ROCE)
4368                 return container_of(handle, struct hclge_vport, roce);
4369         else
4370                 return container_of(handle, struct hclge_vport, nic);
4371 }
4372
4373 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4374                                   struct hnae3_vector_info *vector_info)
4375 {
4376 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2  64
4377
4378         vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4379
4380         /* need an extend offset to config vector >= 64 */
4381         if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4382                 vector_info->io_addr = hdev->hw.io_base +
4383                                 HCLGE_VECTOR_REG_BASE +
4384                                 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4385         else
4386                 vector_info->io_addr = hdev->hw.io_base +
4387                                 HCLGE_VECTOR_EXT_REG_BASE +
4388                                 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4389                                 HCLGE_VECTOR_REG_OFFSET_H +
4390                                 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4391                                 HCLGE_VECTOR_REG_OFFSET;
4392
4393         hdev->vector_status[idx] = hdev->vport[0].vport_id;
4394         hdev->vector_irq[idx] = vector_info->vector;
4395 }
4396
4397 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4398                             struct hnae3_vector_info *vector_info)
4399 {
4400         struct hclge_vport *vport = hclge_get_vport(handle);
4401         struct hnae3_vector_info *vector = vector_info;
4402         struct hclge_dev *hdev = vport->back;
4403         int alloc = 0;
4404         u16 i = 0;
4405         u16 j;
4406
4407         vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4408         vector_num = min(hdev->num_msi_left, vector_num);
4409
4410         for (j = 0; j < vector_num; j++) {
4411                 while (++i < hdev->num_nic_msi) {
4412                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4413                                 hclge_get_vector_info(hdev, i, vector);
4414                                 vector++;
4415                                 alloc++;
4416
4417                                 break;
4418                         }
4419                 }
4420         }
4421         hdev->num_msi_left -= alloc;
4422         hdev->num_msi_used += alloc;
4423
4424         return alloc;
4425 }
4426
4427 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4428 {
4429         int i;
4430
4431         for (i = 0; i < hdev->num_msi; i++)
4432                 if (vector == hdev->vector_irq[i])
4433                         return i;
4434
4435         return -EINVAL;
4436 }
4437
4438 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4439 {
4440         struct hclge_vport *vport = hclge_get_vport(handle);
4441         struct hclge_dev *hdev = vport->back;
4442         int vector_id;
4443
4444         vector_id = hclge_get_vector_index(hdev, vector);
4445         if (vector_id < 0) {
4446                 dev_err(&hdev->pdev->dev,
4447                         "Get vector index fail. vector = %d\n", vector);
4448                 return vector_id;
4449         }
4450
4451         hclge_free_vector(hdev, vector_id);
4452
4453         return 0;
4454 }
4455
4456 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4457 {
4458         return HCLGE_RSS_KEY_SIZE;
4459 }
4460
4461 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4462                                   const u8 hfunc, const u8 *key)
4463 {
4464         struct hclge_rss_config_cmd *req;
4465         unsigned int key_offset = 0;
4466         struct hclge_desc desc;
4467         int key_counts;
4468         int key_size;
4469         int ret;
4470
4471         key_counts = HCLGE_RSS_KEY_SIZE;
4472         req = (struct hclge_rss_config_cmd *)desc.data;
4473
4474         while (key_counts) {
4475                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4476                                            false);
4477
4478                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4479                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4480
4481                 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4482                 memcpy(req->hash_key,
4483                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4484
4485                 key_counts -= key_size;
4486                 key_offset++;
4487                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4488                 if (ret) {
4489                         dev_err(&hdev->pdev->dev,
4490                                 "Configure RSS config fail, status = %d\n",
4491                                 ret);
4492                         return ret;
4493                 }
4494         }
4495         return 0;
4496 }
4497
4498 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4499 {
4500         struct hclge_rss_indirection_table_cmd *req;
4501         struct hclge_desc desc;
4502         int rss_cfg_tbl_num;
4503         u8 rss_msb_oft;
4504         u8 rss_msb_val;
4505         int ret;
4506         u16 qid;
4507         int i;
4508         u32 j;
4509
4510         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4511         rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4512                           HCLGE_RSS_CFG_TBL_SIZE;
4513
4514         for (i = 0; i < rss_cfg_tbl_num; i++) {
4515                 hclge_cmd_setup_basic_desc
4516                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4517
4518                 req->start_table_index =
4519                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4520                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4521                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4522                         qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4523                         req->rss_qid_l[j] = qid & 0xff;
4524                         rss_msb_oft =
4525                                 j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4526                         rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4527                                 (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4528                         req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4529                 }
4530                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4531                 if (ret) {
4532                         dev_err(&hdev->pdev->dev,
4533                                 "Configure rss indir table fail,status = %d\n",
4534                                 ret);
4535                         return ret;
4536                 }
4537         }
4538         return 0;
4539 }
4540
4541 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4542                                  u16 *tc_size, u16 *tc_offset)
4543 {
4544         struct hclge_rss_tc_mode_cmd *req;
4545         struct hclge_desc desc;
4546         int ret;
4547         int i;
4548
4549         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4550         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4551
4552         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4553                 u16 mode = 0;
4554
4555                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4556                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4557                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4558                 hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4559                               tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4560                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4561                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4562
4563                 req->rss_tc_mode[i] = cpu_to_le16(mode);
4564         }
4565
4566         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4567         if (ret)
4568                 dev_err(&hdev->pdev->dev,
4569                         "Configure rss tc mode fail, status = %d\n", ret);
4570
4571         return ret;
4572 }
4573
4574 static void hclge_get_rss_type(struct hclge_vport *vport)
4575 {
4576         if (vport->rss_tuple_sets.ipv4_tcp_en ||
4577             vport->rss_tuple_sets.ipv4_udp_en ||
4578             vport->rss_tuple_sets.ipv4_sctp_en ||
4579             vport->rss_tuple_sets.ipv6_tcp_en ||
4580             vport->rss_tuple_sets.ipv6_udp_en ||
4581             vport->rss_tuple_sets.ipv6_sctp_en)
4582                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4583         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4584                  vport->rss_tuple_sets.ipv6_fragment_en)
4585                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4586         else
4587                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4588 }
4589
4590 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4591 {
4592         struct hclge_rss_input_tuple_cmd *req;
4593         struct hclge_desc desc;
4594         int ret;
4595
4596         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4597
4598         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4599
4600         /* Get the tuple cfg from pf */
4601         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4602         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4603         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4604         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4605         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4606         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4607         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4608         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4609         hclge_get_rss_type(&hdev->vport[0]);
4610         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4611         if (ret)
4612                 dev_err(&hdev->pdev->dev,
4613                         "Configure rss input fail, status = %d\n", ret);
4614         return ret;
4615 }
4616
4617 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4618                          u8 *key, u8 *hfunc)
4619 {
4620         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4621         struct hclge_vport *vport = hclge_get_vport(handle);
4622         int i;
4623
4624         /* Get hash algorithm */
4625         if (hfunc) {
4626                 switch (vport->rss_algo) {
4627                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4628                         *hfunc = ETH_RSS_HASH_TOP;
4629                         break;
4630                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4631                         *hfunc = ETH_RSS_HASH_XOR;
4632                         break;
4633                 default:
4634                         *hfunc = ETH_RSS_HASH_UNKNOWN;
4635                         break;
4636                 }
4637         }
4638
4639         /* Get the RSS Key required by the user */
4640         if (key)
4641                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4642
4643         /* Get indirect table */
4644         if (indir)
4645                 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4646                         indir[i] =  vport->rss_indirection_tbl[i];
4647
4648         return 0;
4649 }
4650
4651 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4652                          const  u8 *key, const  u8 hfunc)
4653 {
4654         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4655         struct hclge_vport *vport = hclge_get_vport(handle);
4656         struct hclge_dev *hdev = vport->back;
4657         u8 hash_algo;
4658         int ret, i;
4659
4660         /* Set the RSS Hash Key if specififed by the user */
4661         if (key) {
4662                 switch (hfunc) {
4663                 case ETH_RSS_HASH_TOP:
4664                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4665                         break;
4666                 case ETH_RSS_HASH_XOR:
4667                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4668                         break;
4669                 case ETH_RSS_HASH_NO_CHANGE:
4670                         hash_algo = vport->rss_algo;
4671                         break;
4672                 default:
4673                         return -EINVAL;
4674                 }
4675
4676                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4677                 if (ret)
4678                         return ret;
4679
4680                 /* Update the shadow RSS key with user specified qids */
4681                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4682                 vport->rss_algo = hash_algo;
4683         }
4684
4685         /* Update the shadow RSS table with user specified qids */
4686         for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4687                 vport->rss_indirection_tbl[i] = indir[i];
4688
4689         /* Update the hardware */
4690         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4691 }
4692
4693 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4694 {
4695         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4696
4697         if (nfc->data & RXH_L4_B_2_3)
4698                 hash_sets |= HCLGE_D_PORT_BIT;
4699         else
4700                 hash_sets &= ~HCLGE_D_PORT_BIT;
4701
4702         if (nfc->data & RXH_IP_SRC)
4703                 hash_sets |= HCLGE_S_IP_BIT;
4704         else
4705                 hash_sets &= ~HCLGE_S_IP_BIT;
4706
4707         if (nfc->data & RXH_IP_DST)
4708                 hash_sets |= HCLGE_D_IP_BIT;
4709         else
4710                 hash_sets &= ~HCLGE_D_IP_BIT;
4711
4712         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4713                 hash_sets |= HCLGE_V_TAG_BIT;
4714
4715         return hash_sets;
4716 }
4717
4718 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4719                                     struct ethtool_rxnfc *nfc,
4720                                     struct hclge_rss_input_tuple_cmd *req)
4721 {
4722         struct hclge_dev *hdev = vport->back;
4723         u8 tuple_sets;
4724
4725         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4726         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4727         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4728         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4729         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4730         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4731         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4732         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4733
4734         tuple_sets = hclge_get_rss_hash_bits(nfc);
4735         switch (nfc->flow_type) {
4736         case TCP_V4_FLOW:
4737                 req->ipv4_tcp_en = tuple_sets;
4738                 break;
4739         case TCP_V6_FLOW:
4740                 req->ipv6_tcp_en = tuple_sets;
4741                 break;
4742         case UDP_V4_FLOW:
4743                 req->ipv4_udp_en = tuple_sets;
4744                 break;
4745         case UDP_V6_FLOW:
4746                 req->ipv6_udp_en = tuple_sets;
4747                 break;
4748         case SCTP_V4_FLOW:
4749                 req->ipv4_sctp_en = tuple_sets;
4750                 break;
4751         case SCTP_V6_FLOW:
4752                 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4753                     (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4754                         return -EINVAL;
4755
4756                 req->ipv6_sctp_en = tuple_sets;
4757                 break;
4758         case IPV4_FLOW:
4759                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4760                 break;
4761         case IPV6_FLOW:
4762                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4763                 break;
4764         default:
4765                 return -EINVAL;
4766         }
4767
4768         return 0;
4769 }
4770
4771 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4772                                struct ethtool_rxnfc *nfc)
4773 {
4774         struct hclge_vport *vport = hclge_get_vport(handle);
4775         struct hclge_dev *hdev = vport->back;
4776         struct hclge_rss_input_tuple_cmd *req;
4777         struct hclge_desc desc;
4778         int ret;
4779
4780         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4781                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
4782                 return -EINVAL;
4783
4784         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4785         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4786
4787         ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4788         if (ret) {
4789                 dev_err(&hdev->pdev->dev,
4790                         "failed to init rss tuple cmd, ret = %d\n", ret);
4791                 return ret;
4792         }
4793
4794         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4795         if (ret) {
4796                 dev_err(&hdev->pdev->dev,
4797                         "Set rss tuple fail, status = %d\n", ret);
4798                 return ret;
4799         }
4800
4801         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4802         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4803         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4804         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4805         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4806         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4807         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4808         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4809         hclge_get_rss_type(vport);
4810         return 0;
4811 }
4812
4813 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4814                                      u8 *tuple_sets)
4815 {
4816         switch (flow_type) {
4817         case TCP_V4_FLOW:
4818                 *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4819                 break;
4820         case UDP_V4_FLOW:
4821                 *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4822                 break;
4823         case TCP_V6_FLOW:
4824                 *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4825                 break;
4826         case UDP_V6_FLOW:
4827                 *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4828                 break;
4829         case SCTP_V4_FLOW:
4830                 *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4831                 break;
4832         case SCTP_V6_FLOW:
4833                 *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4834                 break;
4835         case IPV4_FLOW:
4836         case IPV6_FLOW:
4837                 *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4838                 break;
4839         default:
4840                 return -EINVAL;
4841         }
4842
4843         return 0;
4844 }
4845
4846 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4847 {
4848         u64 tuple_data = 0;
4849
4850         if (tuple_sets & HCLGE_D_PORT_BIT)
4851                 tuple_data |= RXH_L4_B_2_3;
4852         if (tuple_sets & HCLGE_S_PORT_BIT)
4853                 tuple_data |= RXH_L4_B_0_1;
4854         if (tuple_sets & HCLGE_D_IP_BIT)
4855                 tuple_data |= RXH_IP_DST;
4856         if (tuple_sets & HCLGE_S_IP_BIT)
4857                 tuple_data |= RXH_IP_SRC;
4858
4859         return tuple_data;
4860 }
4861
4862 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4863                                struct ethtool_rxnfc *nfc)
4864 {
4865         struct hclge_vport *vport = hclge_get_vport(handle);
4866         u8 tuple_sets;
4867         int ret;
4868
4869         nfc->data = 0;
4870
4871         ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4872         if (ret || !tuple_sets)
4873                 return ret;
4874
4875         nfc->data = hclge_convert_rss_tuple(tuple_sets);
4876
4877         return 0;
4878 }
4879
4880 static int hclge_get_tc_size(struct hnae3_handle *handle)
4881 {
4882         struct hclge_vport *vport = hclge_get_vport(handle);
4883         struct hclge_dev *hdev = vport->back;
4884
4885         return hdev->pf_rss_size_max;
4886 }
4887
4888 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4889 {
4890         struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4891         struct hclge_vport *vport = hdev->vport;
4892         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4893         u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4894         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4895         struct hnae3_tc_info *tc_info;
4896         u16 roundup_size;
4897         u16 rss_size;
4898         int i;
4899
4900         tc_info = &vport->nic.kinfo.tc_info;
4901         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4902                 rss_size = tc_info->tqp_count[i];
4903                 tc_valid[i] = 0;
4904
4905                 if (!(hdev->hw_tc_map & BIT(i)))
4906                         continue;
4907
4908                 /* tc_size set to hardware is the log2 of roundup power of two
4909                  * of rss_size, the acutal queue size is limited by indirection
4910                  * table.
4911                  */
4912                 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4913                     rss_size == 0) {
4914                         dev_err(&hdev->pdev->dev,
4915                                 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4916                                 rss_size);
4917                         return -EINVAL;
4918                 }
4919
4920                 roundup_size = roundup_pow_of_two(rss_size);
4921                 roundup_size = ilog2(roundup_size);
4922
4923                 tc_valid[i] = 1;
4924                 tc_size[i] = roundup_size;
4925                 tc_offset[i] = tc_info->tqp_offset[i];
4926         }
4927
4928         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4929 }
4930
4931 int hclge_rss_init_hw(struct hclge_dev *hdev)
4932 {
4933         struct hclge_vport *vport = hdev->vport;
4934         u16 *rss_indir = vport[0].rss_indirection_tbl;
4935         u8 *key = vport[0].rss_hash_key;
4936         u8 hfunc = vport[0].rss_algo;
4937         int ret;
4938
4939         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4940         if (ret)
4941                 return ret;
4942
4943         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4944         if (ret)
4945                 return ret;
4946
4947         ret = hclge_set_rss_input_tuple(hdev);
4948         if (ret)
4949                 return ret;
4950
4951         return hclge_init_rss_tc_mode(hdev);
4952 }
4953
4954 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4955 {
4956         struct hclge_vport *vport = &hdev->vport[0];
4957         int i;
4958
4959         for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
4960                 vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
4961 }
4962
4963 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
4964 {
4965         u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
4966         int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4967         struct hclge_vport *vport = &hdev->vport[0];
4968         u16 *rss_ind_tbl;
4969
4970         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
4971                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4972
4973         vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4974         vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4975         vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
4976         vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4977         vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4978         vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4979         vport->rss_tuple_sets.ipv6_sctp_en =
4980                 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
4981                 HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
4982                 HCLGE_RSS_INPUT_TUPLE_SCTP;
4983         vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4984
4985         vport->rss_algo = rss_algo;
4986
4987         rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
4988                                    sizeof(*rss_ind_tbl), GFP_KERNEL);
4989         if (!rss_ind_tbl)
4990                 return -ENOMEM;
4991
4992         vport->rss_indirection_tbl = rss_ind_tbl;
4993         memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
4994
4995         hclge_rss_indir_init_cfg(hdev);
4996
4997         return 0;
4998 }
4999
5000 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
5001                                 int vector_id, bool en,
5002                                 struct hnae3_ring_chain_node *ring_chain)
5003 {
5004         struct hclge_dev *hdev = vport->back;
5005         struct hnae3_ring_chain_node *node;
5006         struct hclge_desc desc;
5007         struct hclge_ctrl_vector_chain_cmd *req =
5008                 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
5009         enum hclge_cmd_status status;
5010         enum hclge_opcode_type op;
5011         u16 tqp_type_and_id;
5012         int i;
5013
5014         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
5015         hclge_cmd_setup_basic_desc(&desc, op, false);
5016         req->int_vector_id_l = hnae3_get_field(vector_id,
5017                                                HCLGE_VECTOR_ID_L_M,
5018                                                HCLGE_VECTOR_ID_L_S);
5019         req->int_vector_id_h = hnae3_get_field(vector_id,
5020                                                HCLGE_VECTOR_ID_H_M,
5021                                                HCLGE_VECTOR_ID_H_S);
5022
5023         i = 0;
5024         for (node = ring_chain; node; node = node->next) {
5025                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
5026                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
5027                                 HCLGE_INT_TYPE_S,
5028                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
5029                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
5030                                 HCLGE_TQP_ID_S, node->tqp_index);
5031                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
5032                                 HCLGE_INT_GL_IDX_S,
5033                                 hnae3_get_field(node->int_gl_idx,
5034                                                 HNAE3_RING_GL_IDX_M,
5035                                                 HNAE3_RING_GL_IDX_S));
5036                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
5037                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
5038                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
5039                         req->vfid = vport->vport_id;
5040
5041                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
5042                         if (status) {
5043                                 dev_err(&hdev->pdev->dev,
5044                                         "Map TQP fail, status is %d.\n",
5045                                         status);
5046                                 return -EIO;
5047                         }
5048                         i = 0;
5049
5050                         hclge_cmd_setup_basic_desc(&desc,
5051                                                    op,
5052                                                    false);
5053                         req->int_vector_id_l =
5054                                 hnae3_get_field(vector_id,
5055                                                 HCLGE_VECTOR_ID_L_M,
5056                                                 HCLGE_VECTOR_ID_L_S);
5057                         req->int_vector_id_h =
5058                                 hnae3_get_field(vector_id,
5059                                                 HCLGE_VECTOR_ID_H_M,
5060                                                 HCLGE_VECTOR_ID_H_S);
5061                 }
5062         }
5063
5064         if (i > 0) {
5065                 req->int_cause_num = i;
5066                 req->vfid = vport->vport_id;
5067                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5068                 if (status) {
5069                         dev_err(&hdev->pdev->dev,
5070                                 "Map TQP fail, status is %d.\n", status);
5071                         return -EIO;
5072                 }
5073         }
5074
5075         return 0;
5076 }
5077
5078 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5079                                     struct hnae3_ring_chain_node *ring_chain)
5080 {
5081         struct hclge_vport *vport = hclge_get_vport(handle);
5082         struct hclge_dev *hdev = vport->back;
5083         int vector_id;
5084
5085         vector_id = hclge_get_vector_index(hdev, vector);
5086         if (vector_id < 0) {
5087                 dev_err(&hdev->pdev->dev,
5088                         "failed to get vector index. vector=%d\n", vector);
5089                 return vector_id;
5090         }
5091
5092         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5093 }
5094
5095 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5096                                        struct hnae3_ring_chain_node *ring_chain)
5097 {
5098         struct hclge_vport *vport = hclge_get_vport(handle);
5099         struct hclge_dev *hdev = vport->back;
5100         int vector_id, ret;
5101
5102         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5103                 return 0;
5104
5105         vector_id = hclge_get_vector_index(hdev, vector);
5106         if (vector_id < 0) {
5107                 dev_err(&handle->pdev->dev,
5108                         "Get vector index fail. ret =%d\n", vector_id);
5109                 return vector_id;
5110         }
5111
5112         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5113         if (ret)
5114                 dev_err(&handle->pdev->dev,
5115                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5116                         vector_id, ret);
5117
5118         return ret;
5119 }
5120
5121 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5122                                       bool en_uc, bool en_mc, bool en_bc)
5123 {
5124         struct hclge_vport *vport = &hdev->vport[vf_id];
5125         struct hnae3_handle *handle = &vport->nic;
5126         struct hclge_promisc_cfg_cmd *req;
5127         struct hclge_desc desc;
5128         bool uc_tx_en = en_uc;
5129         u8 promisc_cfg = 0;
5130         int ret;
5131
5132         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5133
5134         req = (struct hclge_promisc_cfg_cmd *)desc.data;
5135         req->vf_id = vf_id;
5136
5137         if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5138                 uc_tx_en = false;
5139
5140         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5141         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5142         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5143         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5144         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5145         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5146         req->extend_promisc = promisc_cfg;
5147
5148         /* to be compatible with DEVICE_VERSION_V1/2 */
5149         promisc_cfg = 0;
5150         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5151         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5152         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5153         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5154         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5155         req->promisc = promisc_cfg;
5156
5157         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5158         if (ret)
5159                 dev_err(&hdev->pdev->dev,
5160                         "failed to set vport %u promisc mode, ret = %d.\n",
5161                         vf_id, ret);
5162
5163         return ret;
5164 }
5165
5166 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5167                                  bool en_mc_pmc, bool en_bc_pmc)
5168 {
5169         return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5170                                           en_uc_pmc, en_mc_pmc, en_bc_pmc);
5171 }
5172
5173 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5174                                   bool en_mc_pmc)
5175 {
5176         struct hclge_vport *vport = hclge_get_vport(handle);
5177         struct hclge_dev *hdev = vport->back;
5178         bool en_bc_pmc = true;
5179
5180         /* For device whose version below V2, if broadcast promisc enabled,
5181          * vlan filter is always bypassed. So broadcast promisc should be
5182          * disabled until user enable promisc mode
5183          */
5184         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5185                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5186
5187         return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5188                                             en_bc_pmc);
5189 }
5190
5191 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5192 {
5193         struct hclge_vport *vport = hclge_get_vport(handle);
5194
5195         set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
5196 }
5197
5198 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5199 {
5200         if (hlist_empty(&hdev->fd_rule_list))
5201                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5202 }
5203
5204 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5205 {
5206         if (!test_bit(location, hdev->fd_bmap)) {
5207                 set_bit(location, hdev->fd_bmap);
5208                 hdev->hclge_fd_rule_num++;
5209         }
5210 }
5211
5212 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5213 {
5214         if (test_bit(location, hdev->fd_bmap)) {
5215                 clear_bit(location, hdev->fd_bmap);
5216                 hdev->hclge_fd_rule_num--;
5217         }
5218 }
5219
5220 static void hclge_fd_free_node(struct hclge_dev *hdev,
5221                                struct hclge_fd_rule *rule)
5222 {
5223         hlist_del(&rule->rule_node);
5224         kfree(rule);
5225         hclge_sync_fd_state(hdev);
5226 }
5227
5228 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5229                                       struct hclge_fd_rule *old_rule,
5230                                       struct hclge_fd_rule *new_rule,
5231                                       enum HCLGE_FD_NODE_STATE state)
5232 {
5233         switch (state) {
5234         case HCLGE_FD_TO_ADD:
5235         case HCLGE_FD_ACTIVE:
5236                 /* 1) if the new state is TO_ADD, just replace the old rule
5237                  * with the same location, no matter its state, because the
5238                  * new rule will be configured to the hardware.
5239                  * 2) if the new state is ACTIVE, it means the new rule
5240                  * has been configured to the hardware, so just replace
5241                  * the old rule node with the same location.
5242                  * 3) for it doesn't add a new node to the list, so it's
5243                  * unnecessary to update the rule number and fd_bmap.
5244                  */
5245                 new_rule->rule_node.next = old_rule->rule_node.next;
5246                 new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5247                 memcpy(old_rule, new_rule, sizeof(*old_rule));
5248                 kfree(new_rule);
5249                 break;
5250         case HCLGE_FD_DELETED:
5251                 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5252                 hclge_fd_free_node(hdev, old_rule);
5253                 break;
5254         case HCLGE_FD_TO_DEL:
5255                 /* if new request is TO_DEL, and old rule is existent
5256                  * 1) the state of old rule is TO_DEL, we need do nothing,
5257                  * because we delete rule by location, other rule content
5258                  * is unncessary.
5259                  * 2) the state of old rule is ACTIVE, we need to change its
5260                  * state to TO_DEL, so the rule will be deleted when periodic
5261                  * task being scheduled.
5262                  * 3) the state of old rule is TO_ADD, it means the rule hasn't
5263                  * been added to hardware, so we just delete the rule node from
5264                  * fd_rule_list directly.
5265                  */
5266                 if (old_rule->state == HCLGE_FD_TO_ADD) {
5267                         hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5268                         hclge_fd_free_node(hdev, old_rule);
5269                         return;
5270                 }
5271                 old_rule->state = HCLGE_FD_TO_DEL;
5272                 break;
5273         }
5274 }
5275
5276 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5277                                                 u16 location,
5278                                                 struct hclge_fd_rule **parent)
5279 {
5280         struct hclge_fd_rule *rule;
5281         struct hlist_node *node;
5282
5283         hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5284                 if (rule->location == location)
5285                         return rule;
5286                 else if (rule->location > location)
5287                         return NULL;
5288                 /* record the parent node, use to keep the nodes in fd_rule_list
5289                  * in ascend order.
5290                  */
5291                 *parent = rule;
5292         }
5293
5294         return NULL;
5295 }
5296
5297 /* insert fd rule node in ascend order according to rule->location */
5298 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5299                                       struct hclge_fd_rule *rule,
5300                                       struct hclge_fd_rule *parent)
5301 {
5302         INIT_HLIST_NODE(&rule->rule_node);
5303
5304         if (parent)
5305                 hlist_add_behind(&rule->rule_node, &parent->rule_node);
5306         else
5307                 hlist_add_head(&rule->rule_node, hlist);
5308 }
5309
5310 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5311                                      struct hclge_fd_user_def_cfg *cfg)
5312 {
5313         struct hclge_fd_user_def_cfg_cmd *req;
5314         struct hclge_desc desc;
5315         u16 data = 0;
5316         int ret;
5317
5318         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5319
5320         req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5321
5322         hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5323         hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5324                         HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5325         req->ol2_cfg = cpu_to_le16(data);
5326
5327         data = 0;
5328         hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5329         hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5330                         HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5331         req->ol3_cfg = cpu_to_le16(data);
5332
5333         data = 0;
5334         hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5335         hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5336                         HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5337         req->ol4_cfg = cpu_to_le16(data);
5338
5339         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5340         if (ret)
5341                 dev_err(&hdev->pdev->dev,
5342                         "failed to set fd user def data, ret= %d\n", ret);
5343         return ret;
5344 }
5345
5346 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5347 {
5348         int ret;
5349
5350         if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5351                 return;
5352
5353         if (!locked)
5354                 spin_lock_bh(&hdev->fd_rule_lock);
5355
5356         ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5357         if (ret)
5358                 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5359
5360         if (!locked)
5361                 spin_unlock_bh(&hdev->fd_rule_lock);
5362 }
5363
5364 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5365                                           struct hclge_fd_rule *rule)
5366 {
5367         struct hlist_head *hlist = &hdev->fd_rule_list;
5368         struct hclge_fd_rule *fd_rule, *parent = NULL;
5369         struct hclge_fd_user_def_info *info, *old_info;
5370         struct hclge_fd_user_def_cfg *cfg;
5371
5372         if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5373             rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5374                 return 0;
5375
5376         /* for valid layer is start from 1, so need minus 1 to get the cfg */
5377         cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5378         info = &rule->ep.user_def;
5379
5380         if (!cfg->ref_cnt || cfg->offset == info->offset)
5381                 return 0;
5382
5383         if (cfg->ref_cnt > 1)
5384                 goto error;
5385
5386         fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5387         if (fd_rule) {
5388                 old_info = &fd_rule->ep.user_def;
5389                 if (info->layer == old_info->layer)
5390                         return 0;
5391         }
5392
5393 error:
5394         dev_err(&hdev->pdev->dev,
5395                 "No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5396                 info->layer + 1);
5397         return -ENOSPC;
5398 }
5399
5400 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5401                                          struct hclge_fd_rule *rule)
5402 {
5403         struct hclge_fd_user_def_cfg *cfg;
5404
5405         if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5406             rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5407                 return;
5408
5409         cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5410         if (!cfg->ref_cnt) {
5411                 cfg->offset = rule->ep.user_def.offset;
5412                 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5413         }
5414         cfg->ref_cnt++;
5415 }
5416
5417 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5418                                          struct hclge_fd_rule *rule)
5419 {
5420         struct hclge_fd_user_def_cfg *cfg;
5421
5422         if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5423             rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5424                 return;
5425
5426         cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5427         if (!cfg->ref_cnt)
5428                 return;
5429
5430         cfg->ref_cnt--;
5431         if (!cfg->ref_cnt) {
5432                 cfg->offset = 0;
5433                 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5434         }
5435 }
5436
5437 static void hclge_update_fd_list(struct hclge_dev *hdev,
5438                                  enum HCLGE_FD_NODE_STATE state, u16 location,
5439                                  struct hclge_fd_rule *new_rule)
5440 {
5441         struct hlist_head *hlist = &hdev->fd_rule_list;
5442         struct hclge_fd_rule *fd_rule, *parent = NULL;
5443
5444         fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5445         if (fd_rule) {
5446                 hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5447                 if (state == HCLGE_FD_ACTIVE)
5448                         hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5449                 hclge_sync_fd_user_def_cfg(hdev, true);
5450
5451                 hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5452                 return;
5453         }
5454
5455         /* it's unlikely to fail here, because we have checked the rule
5456          * exist before.
5457          */
5458         if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5459                 dev_warn(&hdev->pdev->dev,
5460                          "failed to delete fd rule %u, it's inexistent\n",
5461                          location);
5462                 return;
5463         }
5464
5465         hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5466         hclge_sync_fd_user_def_cfg(hdev, true);
5467
5468         hclge_fd_insert_rule_node(hlist, new_rule, parent);
5469         hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5470
5471         if (state == HCLGE_FD_TO_ADD) {
5472                 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5473                 hclge_task_schedule(hdev, 0);
5474         }
5475 }
5476
5477 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5478 {
5479         struct hclge_get_fd_mode_cmd *req;
5480         struct hclge_desc desc;
5481         int ret;
5482
5483         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5484
5485         req = (struct hclge_get_fd_mode_cmd *)desc.data;
5486
5487         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5488         if (ret) {
5489                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5490                 return ret;
5491         }
5492
5493         *fd_mode = req->mode;
5494
5495         return ret;
5496 }
5497
5498 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5499                                    u32 *stage1_entry_num,
5500                                    u32 *stage2_entry_num,
5501                                    u16 *stage1_counter_num,
5502                                    u16 *stage2_counter_num)
5503 {
5504         struct hclge_get_fd_allocation_cmd *req;
5505         struct hclge_desc desc;
5506         int ret;
5507
5508         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5509
5510         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5511
5512         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5513         if (ret) {
5514                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5515                         ret);
5516                 return ret;
5517         }
5518
5519         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5520         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5521         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5522         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5523
5524         return ret;
5525 }
5526
5527 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5528                                    enum HCLGE_FD_STAGE stage_num)
5529 {
5530         struct hclge_set_fd_key_config_cmd *req;
5531         struct hclge_fd_key_cfg *stage;
5532         struct hclge_desc desc;
5533         int ret;
5534
5535         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5536
5537         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5538         stage = &hdev->fd_cfg.key_cfg[stage_num];
5539         req->stage = stage_num;
5540         req->key_select = stage->key_sel;
5541         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5542         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5543         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5544         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5545         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5546         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5547
5548         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5549         if (ret)
5550                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5551
5552         return ret;
5553 }
5554
5555 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5556 {
5557         struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5558
5559         spin_lock_bh(&hdev->fd_rule_lock);
5560         memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5561         spin_unlock_bh(&hdev->fd_rule_lock);
5562
5563         hclge_fd_set_user_def_cmd(hdev, cfg);
5564 }
5565
5566 static int hclge_init_fd_config(struct hclge_dev *hdev)
5567 {
5568 #define LOW_2_WORDS             0x03
5569         struct hclge_fd_key_cfg *key_cfg;
5570         int ret;
5571
5572         if (!hnae3_dev_fd_supported(hdev))
5573                 return 0;
5574
5575         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5576         if (ret)
5577                 return ret;
5578
5579         switch (hdev->fd_cfg.fd_mode) {
5580         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5581                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5582                 break;
5583         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5584                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5585                 break;
5586         default:
5587                 dev_err(&hdev->pdev->dev,
5588                         "Unsupported flow director mode %u\n",
5589                         hdev->fd_cfg.fd_mode);
5590                 return -EOPNOTSUPP;
5591         }
5592
5593         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5594         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5595         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5596         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5597         key_cfg->outer_sipv6_word_en = 0;
5598         key_cfg->outer_dipv6_word_en = 0;
5599
5600         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5601                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5602                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5603                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5604
5605         /* If use max 400bit key, we can support tuples for ether type */
5606         if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5607                 key_cfg->tuple_active |=
5608                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5609                 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5610                         key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5611         }
5612
5613         /* roce_type is used to filter roce frames
5614          * dst_vport is used to specify the rule
5615          */
5616         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5617
5618         ret = hclge_get_fd_allocation(hdev,
5619                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5620                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5621                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5622                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5623         if (ret)
5624                 return ret;
5625
5626         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5627 }
5628
5629 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5630                                 int loc, u8 *key, bool is_add)
5631 {
5632         struct hclge_fd_tcam_config_1_cmd *req1;
5633         struct hclge_fd_tcam_config_2_cmd *req2;
5634         struct hclge_fd_tcam_config_3_cmd *req3;
5635         struct hclge_desc desc[3];
5636         int ret;
5637
5638         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5639         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5640         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5641         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5642         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5643
5644         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5645         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5646         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5647
5648         req1->stage = stage;
5649         req1->xy_sel = sel_x ? 1 : 0;
5650         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5651         req1->index = cpu_to_le32(loc);
5652         req1->entry_vld = sel_x ? is_add : 0;
5653
5654         if (key) {
5655                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5656                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5657                        sizeof(req2->tcam_data));
5658                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5659                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5660         }
5661
5662         ret = hclge_cmd_send(&hdev->hw, desc, 3);
5663         if (ret)
5664                 dev_err(&hdev->pdev->dev,
5665                         "config tcam key fail, ret=%d\n",
5666                         ret);
5667
5668         return ret;
5669 }
5670
5671 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5672                               struct hclge_fd_ad_data *action)
5673 {
5674         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5675         struct hclge_fd_ad_config_cmd *req;
5676         struct hclge_desc desc;
5677         u64 ad_data = 0;
5678         int ret;
5679
5680         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5681
5682         req = (struct hclge_fd_ad_config_cmd *)desc.data;
5683         req->index = cpu_to_le32(loc);
5684         req->stage = stage;
5685
5686         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5687                       action->write_rule_id_to_bd);
5688         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5689                         action->rule_id);
5690         if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5691                 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5692                               action->override_tc);
5693                 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5694                                 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5695         }
5696         ad_data <<= 32;
5697         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5698         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5699                       action->forward_to_direct_queue);
5700         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5701                         action->queue_id);
5702         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5703         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5704                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5705         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5706         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5707                         action->counter_id);
5708
5709         req->ad_data = cpu_to_le64(ad_data);
5710         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5711         if (ret)
5712                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5713
5714         return ret;
5715 }
5716
5717 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5718                                    struct hclge_fd_rule *rule)
5719 {
5720         int offset, moffset, ip_offset;
5721         enum HCLGE_FD_KEY_OPT key_opt;
5722         u16 tmp_x_s, tmp_y_s;
5723         u32 tmp_x_l, tmp_y_l;
5724         u8 *p = (u8 *)rule;
5725         int i;
5726
5727         if (rule->unused_tuple & BIT(tuple_bit))
5728                 return true;
5729
5730         key_opt = tuple_key_info[tuple_bit].key_opt;
5731         offset = tuple_key_info[tuple_bit].offset;
5732         moffset = tuple_key_info[tuple_bit].moffset;
5733
5734         switch (key_opt) {
5735         case KEY_OPT_U8:
5736                 calc_x(*key_x, p[offset], p[moffset]);
5737                 calc_y(*key_y, p[offset], p[moffset]);
5738
5739                 return true;
5740         case KEY_OPT_LE16:
5741                 calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5742                 calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5743                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5744                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5745
5746                 return true;
5747         case KEY_OPT_LE32:
5748                 calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5749                 calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5750                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5751                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5752
5753                 return true;
5754         case KEY_OPT_MAC:
5755                 for (i = 0; i < ETH_ALEN; i++) {
5756                         calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5757                                p[moffset + i]);
5758                         calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5759                                p[moffset + i]);
5760                 }
5761
5762                 return true;
5763         case KEY_OPT_IP:
5764                 ip_offset = IPV4_INDEX * sizeof(u32);
5765                 calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5766                        *(u32 *)(&p[moffset + ip_offset]));
5767                 calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5768                        *(u32 *)(&p[moffset + ip_offset]));
5769                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5770                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5771
5772                 return true;
5773         default:
5774                 return false;
5775         }
5776 }
5777
5778 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5779                                  u8 vf_id, u8 network_port_id)
5780 {
5781         u32 port_number = 0;
5782
5783         if (port_type == HOST_PORT) {
5784                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5785                                 pf_id);
5786                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5787                                 vf_id);
5788                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5789         } else {
5790                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5791                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5792                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5793         }
5794
5795         return port_number;
5796 }
5797
5798 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5799                                        __le32 *key_x, __le32 *key_y,
5800                                        struct hclge_fd_rule *rule)
5801 {
5802         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5803         u8 cur_pos = 0, tuple_size, shift_bits;
5804         unsigned int i;
5805
5806         for (i = 0; i < MAX_META_DATA; i++) {
5807                 tuple_size = meta_data_key_info[i].key_length;
5808                 tuple_bit = key_cfg->meta_data_active & BIT(i);
5809
5810                 switch (tuple_bit) {
5811                 case BIT(ROCE_TYPE):
5812                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5813                         cur_pos += tuple_size;
5814                         break;
5815                 case BIT(DST_VPORT):
5816                         port_number = hclge_get_port_number(HOST_PORT, 0,
5817                                                             rule->vf_id, 0);
5818                         hnae3_set_field(meta_data,
5819                                         GENMASK(cur_pos + tuple_size, cur_pos),
5820                                         cur_pos, port_number);
5821                         cur_pos += tuple_size;
5822                         break;
5823                 default:
5824                         break;
5825                 }
5826         }
5827
5828         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5829         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5830         shift_bits = sizeof(meta_data) * 8 - cur_pos;
5831
5832         *key_x = cpu_to_le32(tmp_x << shift_bits);
5833         *key_y = cpu_to_le32(tmp_y << shift_bits);
5834 }
5835
5836 /* A complete key is combined with meta data key and tuple key.
5837  * Meta data key is stored at the MSB region, and tuple key is stored at
5838  * the LSB region, unused bits will be filled 0.
5839  */
5840 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5841                             struct hclge_fd_rule *rule)
5842 {
5843         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5844         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5845         u8 *cur_key_x, *cur_key_y;
5846         u8 meta_data_region;
5847         u8 tuple_size;
5848         int ret;
5849         u32 i;
5850
5851         memset(key_x, 0, sizeof(key_x));
5852         memset(key_y, 0, sizeof(key_y));
5853         cur_key_x = key_x;
5854         cur_key_y = key_y;
5855
5856         for (i = 0 ; i < MAX_TUPLE; i++) {
5857                 bool tuple_valid;
5858
5859                 tuple_size = tuple_key_info[i].key_length / 8;
5860                 if (!(key_cfg->tuple_active & BIT(i)))
5861                         continue;
5862
5863                 tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
5864                                                      cur_key_y, rule);
5865                 if (tuple_valid) {
5866                         cur_key_x += tuple_size;
5867                         cur_key_y += tuple_size;
5868                 }
5869         }
5870
5871         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5872                         MAX_META_DATA_LENGTH / 8;
5873
5874         hclge_fd_convert_meta_data(key_cfg,
5875                                    (__le32 *)(key_x + meta_data_region),
5876                                    (__le32 *)(key_y + meta_data_region),
5877                                    rule);
5878
5879         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5880                                    true);
5881         if (ret) {
5882                 dev_err(&hdev->pdev->dev,
5883                         "fd key_y config fail, loc=%u, ret=%d\n",
5884                         rule->queue_id, ret);
5885                 return ret;
5886         }
5887
5888         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5889                                    true);
5890         if (ret)
5891                 dev_err(&hdev->pdev->dev,
5892                         "fd key_x config fail, loc=%u, ret=%d\n",
5893                         rule->queue_id, ret);
5894         return ret;
5895 }
5896
5897 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5898                                struct hclge_fd_rule *rule)
5899 {
5900         struct hclge_vport *vport = hdev->vport;
5901         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5902         struct hclge_fd_ad_data ad_data;
5903
5904         memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5905         ad_data.ad_id = rule->location;
5906
5907         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5908                 ad_data.drop_packet = true;
5909         } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5910                 ad_data.override_tc = true;
5911                 ad_data.queue_id =
5912                         kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5913                 ad_data.tc_size =
5914                         ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5915         } else {
5916                 ad_data.forward_to_direct_queue = true;
5917                 ad_data.queue_id = rule->queue_id;
5918         }
5919
5920         ad_data.use_counter = false;
5921         ad_data.counter_id = 0;
5922
5923         ad_data.use_next_stage = false;
5924         ad_data.next_input_key = 0;
5925
5926         ad_data.write_rule_id_to_bd = true;
5927         ad_data.rule_id = rule->location;
5928
5929         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5930 }
5931
5932 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5933                                        u32 *unused_tuple)
5934 {
5935         if (!spec || !unused_tuple)
5936                 return -EINVAL;
5937
5938         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5939
5940         if (!spec->ip4src)
5941                 *unused_tuple |= BIT(INNER_SRC_IP);
5942
5943         if (!spec->ip4dst)
5944                 *unused_tuple |= BIT(INNER_DST_IP);
5945
5946         if (!spec->psrc)
5947                 *unused_tuple |= BIT(INNER_SRC_PORT);
5948
5949         if (!spec->pdst)
5950                 *unused_tuple |= BIT(INNER_DST_PORT);
5951
5952         if (!spec->tos)
5953                 *unused_tuple |= BIT(INNER_IP_TOS);
5954
5955         return 0;
5956 }
5957
5958 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5959                                     u32 *unused_tuple)
5960 {
5961         if (!spec || !unused_tuple)
5962                 return -EINVAL;
5963
5964         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5965                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5966
5967         if (!spec->ip4src)
5968                 *unused_tuple |= BIT(INNER_SRC_IP);
5969
5970         if (!spec->ip4dst)
5971                 *unused_tuple |= BIT(INNER_DST_IP);
5972
5973         if (!spec->tos)
5974                 *unused_tuple |= BIT(INNER_IP_TOS);
5975
5976         if (!spec->proto)
5977                 *unused_tuple |= BIT(INNER_IP_PROTO);
5978
5979         if (spec->l4_4_bytes)
5980                 return -EOPNOTSUPP;
5981
5982         if (spec->ip_ver != ETH_RX_NFC_IP4)
5983                 return -EOPNOTSUPP;
5984
5985         return 0;
5986 }
5987
5988 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5989                                        u32 *unused_tuple)
5990 {
5991         if (!spec || !unused_tuple)
5992                 return -EINVAL;
5993
5994         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5995
5996         /* check whether src/dst ip address used */
5997         if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5998                 *unused_tuple |= BIT(INNER_SRC_IP);
5999
6000         if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6001                 *unused_tuple |= BIT(INNER_DST_IP);
6002
6003         if (!spec->psrc)
6004                 *unused_tuple |= BIT(INNER_SRC_PORT);
6005
6006         if (!spec->pdst)
6007                 *unused_tuple |= BIT(INNER_DST_PORT);
6008
6009         if (!spec->tclass)
6010                 *unused_tuple |= BIT(INNER_IP_TOS);
6011
6012         return 0;
6013 }
6014
6015 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
6016                                     u32 *unused_tuple)
6017 {
6018         if (!spec || !unused_tuple)
6019                 return -EINVAL;
6020
6021         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6022                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6023
6024         /* check whether src/dst ip address used */
6025         if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6026                 *unused_tuple |= BIT(INNER_SRC_IP);
6027
6028         if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6029                 *unused_tuple |= BIT(INNER_DST_IP);
6030
6031         if (!spec->l4_proto)
6032                 *unused_tuple |= BIT(INNER_IP_PROTO);
6033
6034         if (!spec->tclass)
6035                 *unused_tuple |= BIT(INNER_IP_TOS);
6036
6037         if (spec->l4_4_bytes)
6038                 return -EOPNOTSUPP;
6039
6040         return 0;
6041 }
6042
6043 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
6044 {
6045         if (!spec || !unused_tuple)
6046                 return -EINVAL;
6047
6048         *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
6049                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
6050                 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
6051
6052         if (is_zero_ether_addr(spec->h_source))
6053                 *unused_tuple |= BIT(INNER_SRC_MAC);
6054
6055         if (is_zero_ether_addr(spec->h_dest))
6056                 *unused_tuple |= BIT(INNER_DST_MAC);
6057
6058         if (!spec->h_proto)
6059                 *unused_tuple |= BIT(INNER_ETH_TYPE);
6060
6061         return 0;
6062 }
6063
6064 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
6065                                     struct ethtool_rx_flow_spec *fs,
6066                                     u32 *unused_tuple)
6067 {
6068         if (fs->flow_type & FLOW_EXT) {
6069                 if (fs->h_ext.vlan_etype) {
6070                         dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
6071                         return -EOPNOTSUPP;
6072                 }
6073
6074                 if (!fs->h_ext.vlan_tci)
6075                         *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6076
6077                 if (fs->m_ext.vlan_tci &&
6078                     be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
6079                         dev_err(&hdev->pdev->dev,
6080                                 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
6081                                 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
6082                         return -EINVAL;
6083                 }
6084         } else {
6085                 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6086         }
6087
6088         if (fs->flow_type & FLOW_MAC_EXT) {
6089                 if (hdev->fd_cfg.fd_mode !=
6090                     HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6091                         dev_err(&hdev->pdev->dev,
6092                                 "FLOW_MAC_EXT is not supported in current fd mode!\n");
6093                         return -EOPNOTSUPP;
6094                 }
6095
6096                 if (is_zero_ether_addr(fs->h_ext.h_dest))
6097                         *unused_tuple |= BIT(INNER_DST_MAC);
6098                 else
6099                         *unused_tuple &= ~BIT(INNER_DST_MAC);
6100         }
6101
6102         return 0;
6103 }
6104
6105 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
6106                                        struct hclge_fd_user_def_info *info)
6107 {
6108         switch (flow_type) {
6109         case ETHER_FLOW:
6110                 info->layer = HCLGE_FD_USER_DEF_L2;
6111                 *unused_tuple &= ~BIT(INNER_L2_RSV);
6112                 break;
6113         case IP_USER_FLOW:
6114         case IPV6_USER_FLOW:
6115                 info->layer = HCLGE_FD_USER_DEF_L3;
6116                 *unused_tuple &= ~BIT(INNER_L3_RSV);
6117                 break;
6118         case TCP_V4_FLOW:
6119         case UDP_V4_FLOW:
6120         case TCP_V6_FLOW:
6121         case UDP_V6_FLOW:
6122                 info->layer = HCLGE_FD_USER_DEF_L4;
6123                 *unused_tuple &= ~BIT(INNER_L4_RSV);
6124                 break;
6125         default:
6126                 return -EOPNOTSUPP;
6127         }
6128
6129         return 0;
6130 }
6131
6132 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6133 {
6134         return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6135 }
6136
6137 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6138                                          struct ethtool_rx_flow_spec *fs,
6139                                          u32 *unused_tuple,
6140                                          struct hclge_fd_user_def_info *info)
6141 {
6142         u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6143         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6144         u16 data, offset, data_mask, offset_mask;
6145         int ret;
6146
6147         info->layer = HCLGE_FD_USER_DEF_NONE;
6148         *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6149
6150         if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6151                 return 0;
6152
6153         /* user-def data from ethtool is 64 bit value, the bit0~15 is used
6154          * for data, and bit32~47 is used for offset.
6155          */
6156         data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6157         data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6158         offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6159         offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6160
6161         if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6162                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6163                 return -EOPNOTSUPP;
6164         }
6165
6166         if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6167                 dev_err(&hdev->pdev->dev,
6168                         "user-def offset[%u] should be no more than %u\n",
6169                         offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6170                 return -EINVAL;
6171         }
6172
6173         if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6174                 dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6175                 return -EINVAL;
6176         }
6177
6178         ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6179         if (ret) {
6180                 dev_err(&hdev->pdev->dev,
6181                         "unsupported flow type for user-def bytes, ret = %d\n",
6182                         ret);
6183                 return ret;
6184         }
6185
6186         info->data = data;
6187         info->data_mask = data_mask;
6188         info->offset = offset;
6189
6190         return 0;
6191 }
6192
6193 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6194                                struct ethtool_rx_flow_spec *fs,
6195                                u32 *unused_tuple,
6196                                struct hclge_fd_user_def_info *info)
6197 {
6198         u32 flow_type;
6199         int ret;
6200
6201         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6202                 dev_err(&hdev->pdev->dev,
6203                         "failed to config fd rules, invalid rule location: %u, max is %u\n.",
6204                         fs->location,
6205                         hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6206                 return -EINVAL;
6207         }
6208
6209         ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6210         if (ret)
6211                 return ret;
6212
6213         flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6214         switch (flow_type) {
6215         case SCTP_V4_FLOW:
6216         case TCP_V4_FLOW:
6217         case UDP_V4_FLOW:
6218                 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6219                                                   unused_tuple);
6220                 break;
6221         case IP_USER_FLOW:
6222                 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6223                                                unused_tuple);
6224                 break;
6225         case SCTP_V6_FLOW:
6226         case TCP_V6_FLOW:
6227         case UDP_V6_FLOW:
6228                 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6229                                                   unused_tuple);
6230                 break;
6231         case IPV6_USER_FLOW:
6232                 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6233                                                unused_tuple);
6234                 break;
6235         case ETHER_FLOW:
6236                 if (hdev->fd_cfg.fd_mode !=
6237                         HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6238                         dev_err(&hdev->pdev->dev,
6239                                 "ETHER_FLOW is not supported in current fd mode!\n");
6240                         return -EOPNOTSUPP;
6241                 }
6242
6243                 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6244                                                  unused_tuple);
6245                 break;
6246         default:
6247                 dev_err(&hdev->pdev->dev,
6248                         "unsupported protocol type, protocol type = %#x\n",
6249                         flow_type);
6250                 return -EOPNOTSUPP;
6251         }
6252
6253         if (ret) {
6254                 dev_err(&hdev->pdev->dev,
6255                         "failed to check flow union tuple, ret = %d\n",
6256                         ret);
6257                 return ret;
6258         }
6259
6260         return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6261 }
6262
6263 static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
6264                                       struct ethtool_rx_flow_spec *fs,
6265                                       struct hclge_fd_rule *rule, u8 ip_proto)
6266 {
6267         rule->tuples.src_ip[IPV4_INDEX] =
6268                         be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6269         rule->tuples_mask.src_ip[IPV4_INDEX] =
6270                         be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6271
6272         rule->tuples.dst_ip[IPV4_INDEX] =
6273                         be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6274         rule->tuples_mask.dst_ip[IPV4_INDEX] =
6275                         be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6276
6277         rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6278         rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6279
6280         rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6281         rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6282
6283         rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6284         rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6285
6286         rule->tuples.ether_proto = ETH_P_IP;
6287         rule->tuples_mask.ether_proto = 0xFFFF;
6288
6289         rule->tuples.ip_proto = ip_proto;
6290         rule->tuples_mask.ip_proto = 0xFF;
6291 }
6292
6293 static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
6294                                    struct ethtool_rx_flow_spec *fs,
6295                                    struct hclge_fd_rule *rule)
6296 {
6297         rule->tuples.src_ip[IPV4_INDEX] =
6298                         be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6299         rule->tuples_mask.src_ip[IPV4_INDEX] =
6300                         be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6301
6302         rule->tuples.dst_ip[IPV4_INDEX] =
6303                         be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6304         rule->tuples_mask.dst_ip[IPV4_INDEX] =
6305                         be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6306
6307         rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6308         rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6309
6310         rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6311         rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6312
6313         rule->tuples.ether_proto = ETH_P_IP;
6314         rule->tuples_mask.ether_proto = 0xFFFF;
6315 }
6316
6317 static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
6318                                       struct ethtool_rx_flow_spec *fs,
6319                                       struct hclge_fd_rule *rule, u8 ip_proto)
6320 {
6321         be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6322                           IPV6_SIZE);
6323         be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6324                           IPV6_SIZE);
6325
6326         be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6327                           IPV6_SIZE);
6328         be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6329                           IPV6_SIZE);
6330
6331         rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6332         rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6333
6334         rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6335         rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6336
6337         rule->tuples.ether_proto = ETH_P_IPV6;
6338         rule->tuples_mask.ether_proto = 0xFFFF;
6339
6340         rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6341         rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6342
6343         rule->tuples.ip_proto = ip_proto;
6344         rule->tuples_mask.ip_proto = 0xFF;
6345 }
6346
6347 static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6348                                    struct ethtool_rx_flow_spec *fs,
6349                                    struct hclge_fd_rule *rule)
6350 {
6351         be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6352                           IPV6_SIZE);
6353         be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6354                           IPV6_SIZE);
6355
6356         be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6357                           IPV6_SIZE);
6358         be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6359                           IPV6_SIZE);
6360
6361         rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6362         rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6363
6364         rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6365         rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6366
6367         rule->tuples.ether_proto = ETH_P_IPV6;
6368         rule->tuples_mask.ether_proto = 0xFFFF;
6369 }
6370
6371 static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6372                                      struct ethtool_rx_flow_spec *fs,
6373                                      struct hclge_fd_rule *rule)
6374 {
6375         ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6376         ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6377
6378         ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6379         ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6380
6381         rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6382         rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6383 }
6384
6385 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6386                                         struct hclge_fd_rule *rule)
6387 {
6388         switch (info->layer) {
6389         case HCLGE_FD_USER_DEF_L2:
6390                 rule->tuples.l2_user_def = info->data;
6391                 rule->tuples_mask.l2_user_def = info->data_mask;
6392                 break;
6393         case HCLGE_FD_USER_DEF_L3:
6394                 rule->tuples.l3_user_def = info->data;
6395                 rule->tuples_mask.l3_user_def = info->data_mask;
6396                 break;
6397         case HCLGE_FD_USER_DEF_L4:
6398                 rule->tuples.l4_user_def = (u32)info->data << 16;
6399                 rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6400                 break;
6401         default:
6402                 break;
6403         }
6404
6405         rule->ep.user_def = *info;
6406 }
6407
6408 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6409                               struct ethtool_rx_flow_spec *fs,
6410                               struct hclge_fd_rule *rule,
6411                               struct hclge_fd_user_def_info *info)
6412 {
6413         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6414
6415         switch (flow_type) {
6416         case SCTP_V4_FLOW:
6417                 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6418                 break;
6419         case TCP_V4_FLOW:
6420                 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6421                 break;
6422         case UDP_V4_FLOW:
6423                 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6424                 break;
6425         case IP_USER_FLOW:
6426                 hclge_fd_get_ip4_tuple(hdev, fs, rule);
6427                 break;
6428         case SCTP_V6_FLOW:
6429                 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6430                 break;
6431         case TCP_V6_FLOW:
6432                 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6433                 break;
6434         case UDP_V6_FLOW:
6435                 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6436                 break;
6437         case IPV6_USER_FLOW:
6438                 hclge_fd_get_ip6_tuple(hdev, fs, rule);
6439                 break;
6440         case ETHER_FLOW:
6441                 hclge_fd_get_ether_tuple(hdev, fs, rule);
6442                 break;
6443         default:
6444                 return -EOPNOTSUPP;
6445         }
6446
6447         if (fs->flow_type & FLOW_EXT) {
6448                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6449                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6450                 hclge_fd_get_user_def_tuple(info, rule);
6451         }
6452
6453         if (fs->flow_type & FLOW_MAC_EXT) {
6454                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6455                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6456         }
6457
6458         return 0;
6459 }
6460
6461 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6462                                 struct hclge_fd_rule *rule)
6463 {
6464         int ret;
6465
6466         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6467         if (ret)
6468                 return ret;
6469
6470         return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6471 }
6472
6473 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6474                                      struct hclge_fd_rule *rule)
6475 {
6476         int ret;
6477
6478         spin_lock_bh(&hdev->fd_rule_lock);
6479
6480         if (hdev->fd_active_type != rule->rule_type &&
6481             (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6482              hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6483                 dev_err(&hdev->pdev->dev,
6484                         "mode conflict(new type %d, active type %d), please delete existent rules first\n",
6485                         rule->rule_type, hdev->fd_active_type);
6486                 spin_unlock_bh(&hdev->fd_rule_lock);
6487                 return -EINVAL;
6488         }
6489
6490         ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6491         if (ret)
6492                 goto out;
6493
6494         ret = hclge_clear_arfs_rules(hdev);
6495         if (ret)
6496                 goto out;
6497
6498         ret = hclge_fd_config_rule(hdev, rule);
6499         if (ret)
6500                 goto out;
6501
6502         rule->state = HCLGE_FD_ACTIVE;
6503         hdev->fd_active_type = rule->rule_type;
6504         hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6505
6506 out:
6507         spin_unlock_bh(&hdev->fd_rule_lock);
6508         return ret;
6509 }
6510
6511 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6512 {
6513         struct hclge_vport *vport = hclge_get_vport(handle);
6514         struct hclge_dev *hdev = vport->back;
6515
6516         return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6517 }
6518
6519 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6520                                       u16 *vport_id, u8 *action, u16 *queue_id)
6521 {
6522         struct hclge_vport *vport = hdev->vport;
6523
6524         if (ring_cookie == RX_CLS_FLOW_DISC) {
6525                 *action = HCLGE_FD_ACTION_DROP_PACKET;
6526         } else {
6527                 u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6528                 u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6529                 u16 tqps;
6530
6531                 if (vf > hdev->num_req_vfs) {
6532                         dev_err(&hdev->pdev->dev,
6533                                 "Error: vf id (%u) > max vf num (%u)\n",
6534                                 vf, hdev->num_req_vfs);
6535                         return -EINVAL;
6536                 }
6537
6538                 *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6539                 tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6540
6541                 if (ring >= tqps) {
6542                         dev_err(&hdev->pdev->dev,
6543                                 "Error: queue id (%u) > max tqp num (%u)\n",
6544                                 ring, tqps - 1);
6545                         return -EINVAL;
6546                 }
6547
6548                 *action = HCLGE_FD_ACTION_SELECT_QUEUE;
6549                 *queue_id = ring;
6550         }
6551
6552         return 0;
6553 }
6554
6555 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6556                               struct ethtool_rxnfc *cmd)
6557 {
6558         struct hclge_vport *vport = hclge_get_vport(handle);
6559         struct hclge_dev *hdev = vport->back;
6560         struct hclge_fd_user_def_info info;
6561         u16 dst_vport_id = 0, q_index = 0;
6562         struct ethtool_rx_flow_spec *fs;
6563         struct hclge_fd_rule *rule;
6564         u32 unused = 0;
6565         u8 action;
6566         int ret;
6567
6568         if (!hnae3_dev_fd_supported(hdev)) {
6569                 dev_err(&hdev->pdev->dev,
6570                         "flow table director is not supported\n");
6571                 return -EOPNOTSUPP;
6572         }
6573
6574         if (!hdev->fd_en) {
6575                 dev_err(&hdev->pdev->dev,
6576                         "please enable flow director first\n");
6577                 return -EOPNOTSUPP;
6578         }
6579
6580         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6581
6582         ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6583         if (ret)
6584                 return ret;
6585
6586         ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6587                                          &action, &q_index);
6588         if (ret)
6589                 return ret;
6590
6591         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6592         if (!rule)
6593                 return -ENOMEM;
6594
6595         ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
6596         if (ret) {
6597                 kfree(rule);
6598                 return ret;
6599         }
6600
6601         rule->flow_type = fs->flow_type;
6602         rule->location = fs->location;
6603         rule->unused_tuple = unused;
6604         rule->vf_id = dst_vport_id;
6605         rule->queue_id = q_index;
6606         rule->action = action;
6607         rule->rule_type = HCLGE_FD_EP_ACTIVE;
6608
6609         ret = hclge_add_fd_entry_common(hdev, rule);
6610         if (ret)
6611                 kfree(rule);
6612
6613         return ret;
6614 }
6615
6616 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6617                               struct ethtool_rxnfc *cmd)
6618 {
6619         struct hclge_vport *vport = hclge_get_vport(handle);
6620         struct hclge_dev *hdev = vport->back;
6621         struct ethtool_rx_flow_spec *fs;
6622         int ret;
6623
6624         if (!hnae3_dev_fd_supported(hdev))
6625                 return -EOPNOTSUPP;
6626
6627         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6628
6629         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6630                 return -EINVAL;
6631
6632         spin_lock_bh(&hdev->fd_rule_lock);
6633         if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6634             !test_bit(fs->location, hdev->fd_bmap)) {
6635                 dev_err(&hdev->pdev->dev,
6636                         "Delete fail, rule %u is inexistent\n", fs->location);
6637                 spin_unlock_bh(&hdev->fd_rule_lock);
6638                 return -ENOENT;
6639         }
6640
6641         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6642                                    NULL, false);
6643         if (ret)
6644                 goto out;
6645
6646         hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6647
6648 out:
6649         spin_unlock_bh(&hdev->fd_rule_lock);
6650         return ret;
6651 }
6652
6653 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6654                                          bool clear_list)
6655 {
6656         struct hclge_fd_rule *rule;
6657         struct hlist_node *node;
6658         u16 location;
6659
6660         if (!hnae3_dev_fd_supported(hdev))
6661                 return;
6662
6663         spin_lock_bh(&hdev->fd_rule_lock);
6664
6665         for_each_set_bit(location, hdev->fd_bmap,
6666                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6667                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6668                                      NULL, false);
6669
6670         if (clear_list) {
6671                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6672                                           rule_node) {
6673                         hlist_del(&rule->rule_node);
6674                         kfree(rule);
6675                 }
6676                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6677                 hdev->hclge_fd_rule_num = 0;
6678                 bitmap_zero(hdev->fd_bmap,
6679                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6680         }
6681
6682         spin_unlock_bh(&hdev->fd_rule_lock);
6683 }
6684
6685 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6686 {
6687         hclge_clear_fd_rules_in_list(hdev, true);
6688         hclge_fd_disable_user_def(hdev);
6689 }
6690
6691 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6692 {
6693         struct hclge_vport *vport = hclge_get_vport(handle);
6694         struct hclge_dev *hdev = vport->back;
6695         struct hclge_fd_rule *rule;
6696         struct hlist_node *node;
6697
6698         /* Return ok here, because reset error handling will check this
6699          * return value. If error is returned here, the reset process will
6700          * fail.
6701          */
6702         if (!hnae3_dev_fd_supported(hdev))
6703                 return 0;
6704
6705         /* if fd is disabled, should not restore it when reset */
6706         if (!hdev->fd_en)
6707                 return 0;
6708
6709         spin_lock_bh(&hdev->fd_rule_lock);
6710         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6711                 if (rule->state == HCLGE_FD_ACTIVE)
6712                         rule->state = HCLGE_FD_TO_ADD;
6713         }
6714         spin_unlock_bh(&hdev->fd_rule_lock);
6715         set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6716
6717         return 0;
6718 }
6719
6720 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6721                                  struct ethtool_rxnfc *cmd)
6722 {
6723         struct hclge_vport *vport = hclge_get_vport(handle);
6724         struct hclge_dev *hdev = vport->back;
6725
6726         if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6727                 return -EOPNOTSUPP;
6728
6729         cmd->rule_cnt = hdev->hclge_fd_rule_num;
6730         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6731
6732         return 0;
6733 }
6734
6735 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6736                                      struct ethtool_tcpip4_spec *spec,
6737                                      struct ethtool_tcpip4_spec *spec_mask)
6738 {
6739         spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6740         spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6741                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6742
6743         spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6744         spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6745                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6746
6747         spec->psrc = cpu_to_be16(rule->tuples.src_port);
6748         spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6749                         0 : cpu_to_be16(rule->tuples_mask.src_port);
6750
6751         spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6752         spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6753                         0 : cpu_to_be16(rule->tuples_mask.dst_port);
6754
6755         spec->tos = rule->tuples.ip_tos;
6756         spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6757                         0 : rule->tuples_mask.ip_tos;
6758 }
6759
6760 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6761                                   struct ethtool_usrip4_spec *spec,
6762                                   struct ethtool_usrip4_spec *spec_mask)
6763 {
6764         spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6765         spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6766                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6767
6768         spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6769         spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6770                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6771
6772         spec->tos = rule->tuples.ip_tos;
6773         spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6774                         0 : rule->tuples_mask.ip_tos;
6775
6776         spec->proto = rule->tuples.ip_proto;
6777         spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6778                         0 : rule->tuples_mask.ip_proto;
6779
6780         spec->ip_ver = ETH_RX_NFC_IP4;
6781 }
6782
6783 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6784                                      struct ethtool_tcpip6_spec *spec,
6785                                      struct ethtool_tcpip6_spec *spec_mask)
6786 {
6787         cpu_to_be32_array(spec->ip6src,
6788                           rule->tuples.src_ip, IPV6_SIZE);
6789         cpu_to_be32_array(spec->ip6dst,
6790                           rule->tuples.dst_ip, IPV6_SIZE);
6791         if (rule->unused_tuple & BIT(INNER_SRC_IP))
6792                 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6793         else
6794                 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6795                                   IPV6_SIZE);
6796
6797         if (rule->unused_tuple & BIT(INNER_DST_IP))
6798                 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6799         else
6800                 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6801                                   IPV6_SIZE);
6802
6803         spec->tclass = rule->tuples.ip_tos;
6804         spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6805                         0 : rule->tuples_mask.ip_tos;
6806
6807         spec->psrc = cpu_to_be16(rule->tuples.src_port);
6808         spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6809                         0 : cpu_to_be16(rule->tuples_mask.src_port);
6810
6811         spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6812         spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6813                         0 : cpu_to_be16(rule->tuples_mask.dst_port);
6814 }
6815
6816 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6817                                   struct ethtool_usrip6_spec *spec,
6818                                   struct ethtool_usrip6_spec *spec_mask)
6819 {
6820         cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6821         cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6822         if (rule->unused_tuple & BIT(INNER_SRC_IP))
6823                 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6824         else
6825                 cpu_to_be32_array(spec_mask->ip6src,
6826                                   rule->tuples_mask.src_ip, IPV6_SIZE);
6827
6828         if (rule->unused_tuple & BIT(INNER_DST_IP))
6829                 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6830         else
6831                 cpu_to_be32_array(spec_mask->ip6dst,
6832                                   rule->tuples_mask.dst_ip, IPV6_SIZE);
6833
6834         spec->tclass = rule->tuples.ip_tos;
6835         spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6836                         0 : rule->tuples_mask.ip_tos;
6837
6838         spec->l4_proto = rule->tuples.ip_proto;
6839         spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6840                         0 : rule->tuples_mask.ip_proto;
6841 }
6842
6843 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6844                                     struct ethhdr *spec,
6845                                     struct ethhdr *spec_mask)
6846 {
6847         ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6848         ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6849
6850         if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6851                 eth_zero_addr(spec_mask->h_source);
6852         else
6853                 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6854
6855         if (rule->unused_tuple & BIT(INNER_DST_MAC))
6856                 eth_zero_addr(spec_mask->h_dest);
6857         else
6858                 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6859
6860         spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6861         spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6862                         0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6863 }
6864
6865 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
6866                                        struct hclge_fd_rule *rule)
6867 {
6868         if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
6869             HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
6870                 fs->h_ext.data[0] = 0;
6871                 fs->h_ext.data[1] = 0;
6872                 fs->m_ext.data[0] = 0;
6873                 fs->m_ext.data[1] = 0;
6874         } else {
6875                 fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
6876                 fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
6877                 fs->m_ext.data[0] =
6878                                 cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
6879                 fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
6880         }
6881 }
6882
6883 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6884                                   struct hclge_fd_rule *rule)
6885 {
6886         if (fs->flow_type & FLOW_EXT) {
6887                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6888                 fs->m_ext.vlan_tci =
6889                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6890                                 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6891
6892                 hclge_fd_get_user_def_info(fs, rule);
6893         }
6894
6895         if (fs->flow_type & FLOW_MAC_EXT) {
6896                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6897                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6898                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
6899                 else
6900                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
6901                                         rule->tuples_mask.dst_mac);
6902         }
6903 }
6904
6905 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6906                                   struct ethtool_rxnfc *cmd)
6907 {
6908         struct hclge_vport *vport = hclge_get_vport(handle);
6909         struct hclge_fd_rule *rule = NULL;
6910         struct hclge_dev *hdev = vport->back;
6911         struct ethtool_rx_flow_spec *fs;
6912         struct hlist_node *node2;
6913
6914         if (!hnae3_dev_fd_supported(hdev))
6915                 return -EOPNOTSUPP;
6916
6917         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6918
6919         spin_lock_bh(&hdev->fd_rule_lock);
6920
6921         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6922                 if (rule->location >= fs->location)
6923                         break;
6924         }
6925
6926         if (!rule || fs->location != rule->location) {
6927                 spin_unlock_bh(&hdev->fd_rule_lock);
6928
6929                 return -ENOENT;
6930         }
6931
6932         fs->flow_type = rule->flow_type;
6933         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6934         case SCTP_V4_FLOW:
6935         case TCP_V4_FLOW:
6936         case UDP_V4_FLOW:
6937                 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6938                                          &fs->m_u.tcp_ip4_spec);
6939                 break;
6940         case IP_USER_FLOW:
6941                 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6942                                       &fs->m_u.usr_ip4_spec);
6943                 break;
6944         case SCTP_V6_FLOW:
6945         case TCP_V6_FLOW:
6946         case UDP_V6_FLOW:
6947                 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6948                                          &fs->m_u.tcp_ip6_spec);
6949                 break;
6950         case IPV6_USER_FLOW:
6951                 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6952                                       &fs->m_u.usr_ip6_spec);
6953                 break;
6954         /* The flow type of fd rule has been checked before adding in to rule
6955          * list. As other flow types have been handled, it must be ETHER_FLOW
6956          * for the default case
6957          */
6958         default:
6959                 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6960                                         &fs->m_u.ether_spec);
6961                 break;
6962         }
6963
6964         hclge_fd_get_ext_info(fs, rule);
6965
6966         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6967                 fs->ring_cookie = RX_CLS_FLOW_DISC;
6968         } else {
6969                 u64 vf_id;
6970
6971                 fs->ring_cookie = rule->queue_id;
6972                 vf_id = rule->vf_id;
6973                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6974                 fs->ring_cookie |= vf_id;
6975         }
6976
6977         spin_unlock_bh(&hdev->fd_rule_lock);
6978
6979         return 0;
6980 }
6981
6982 static int hclge_get_all_rules(struct hnae3_handle *handle,
6983                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
6984 {
6985         struct hclge_vport *vport = hclge_get_vport(handle);
6986         struct hclge_dev *hdev = vport->back;
6987         struct hclge_fd_rule *rule;
6988         struct hlist_node *node2;
6989         int cnt = 0;
6990
6991         if (!hnae3_dev_fd_supported(hdev))
6992                 return -EOPNOTSUPP;
6993
6994         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6995
6996         spin_lock_bh(&hdev->fd_rule_lock);
6997         hlist_for_each_entry_safe(rule, node2,
6998                                   &hdev->fd_rule_list, rule_node) {
6999                 if (cnt == cmd->rule_cnt) {
7000                         spin_unlock_bh(&hdev->fd_rule_lock);
7001                         return -EMSGSIZE;
7002                 }
7003
7004                 if (rule->state == HCLGE_FD_TO_DEL)
7005                         continue;
7006
7007                 rule_locs[cnt] = rule->location;
7008                 cnt++;
7009         }
7010
7011         spin_unlock_bh(&hdev->fd_rule_lock);
7012
7013         cmd->rule_cnt = cnt;
7014
7015         return 0;
7016 }
7017
7018 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
7019                                      struct hclge_fd_rule_tuples *tuples)
7020 {
7021 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
7022 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
7023
7024         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
7025         tuples->ip_proto = fkeys->basic.ip_proto;
7026         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
7027
7028         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
7029                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
7030                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
7031         } else {
7032                 int i;
7033
7034                 for (i = 0; i < IPV6_SIZE; i++) {
7035                         tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
7036                         tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
7037                 }
7038         }
7039 }
7040
7041 /* traverse all rules, check whether an existed rule has the same tuples */
7042 static struct hclge_fd_rule *
7043 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
7044                           const struct hclge_fd_rule_tuples *tuples)
7045 {
7046         struct hclge_fd_rule *rule = NULL;
7047         struct hlist_node *node;
7048
7049         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7050                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
7051                         return rule;
7052         }
7053
7054         return NULL;
7055 }
7056
7057 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
7058                                      struct hclge_fd_rule *rule)
7059 {
7060         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
7061                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
7062                              BIT(INNER_SRC_PORT);
7063         rule->action = 0;
7064         rule->vf_id = 0;
7065         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
7066         rule->state = HCLGE_FD_TO_ADD;
7067         if (tuples->ether_proto == ETH_P_IP) {
7068                 if (tuples->ip_proto == IPPROTO_TCP)
7069                         rule->flow_type = TCP_V4_FLOW;
7070                 else
7071                         rule->flow_type = UDP_V4_FLOW;
7072         } else {
7073                 if (tuples->ip_proto == IPPROTO_TCP)
7074                         rule->flow_type = TCP_V6_FLOW;
7075                 else
7076                         rule->flow_type = UDP_V6_FLOW;
7077         }
7078         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
7079         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7080 }
7081
7082 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
7083                                       u16 flow_id, struct flow_keys *fkeys)
7084 {
7085         struct hclge_vport *vport = hclge_get_vport(handle);
7086         struct hclge_fd_rule_tuples new_tuples = {};
7087         struct hclge_dev *hdev = vport->back;
7088         struct hclge_fd_rule *rule;
7089         u16 bit_id;
7090
7091         if (!hnae3_dev_fd_supported(hdev))
7092                 return -EOPNOTSUPP;
7093
7094         /* when there is already fd rule existed add by user,
7095          * arfs should not work
7096          */
7097         spin_lock_bh(&hdev->fd_rule_lock);
7098         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7099             hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7100                 spin_unlock_bh(&hdev->fd_rule_lock);
7101                 return -EOPNOTSUPP;
7102         }
7103
7104         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7105
7106         /* check is there flow director filter existed for this flow,
7107          * if not, create a new filter for it;
7108          * if filter exist with different queue id, modify the filter;
7109          * if filter exist with same queue id, do nothing
7110          */
7111         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7112         if (!rule) {
7113                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7114                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7115                         spin_unlock_bh(&hdev->fd_rule_lock);
7116                         return -ENOSPC;
7117                 }
7118
7119                 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7120                 if (!rule) {
7121                         spin_unlock_bh(&hdev->fd_rule_lock);
7122                         return -ENOMEM;
7123                 }
7124
7125                 rule->location = bit_id;
7126                 rule->arfs.flow_id = flow_id;
7127                 rule->queue_id = queue_id;
7128                 hclge_fd_build_arfs_rule(&new_tuples, rule);
7129                 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7130                 hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7131         } else if (rule->queue_id != queue_id) {
7132                 rule->queue_id = queue_id;
7133                 rule->state = HCLGE_FD_TO_ADD;
7134                 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7135                 hclge_task_schedule(hdev, 0);
7136         }
7137         spin_unlock_bh(&hdev->fd_rule_lock);
7138         return rule->location;
7139 }
7140
7141 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7142 {
7143 #ifdef CONFIG_RFS_ACCEL
7144         struct hnae3_handle *handle = &hdev->vport[0].nic;
7145         struct hclge_fd_rule *rule;
7146         struct hlist_node *node;
7147
7148         spin_lock_bh(&hdev->fd_rule_lock);
7149         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7150                 spin_unlock_bh(&hdev->fd_rule_lock);
7151                 return;
7152         }
7153         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7154                 if (rule->state != HCLGE_FD_ACTIVE)
7155                         continue;
7156                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7157                                         rule->arfs.flow_id, rule->location)) {
7158                         rule->state = HCLGE_FD_TO_DEL;
7159                         set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7160                 }
7161         }
7162         spin_unlock_bh(&hdev->fd_rule_lock);
7163 #endif
7164 }
7165
7166 /* make sure being called after lock up with fd_rule_lock */
7167 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7168 {
7169 #ifdef CONFIG_RFS_ACCEL
7170         struct hclge_fd_rule *rule;
7171         struct hlist_node *node;
7172         int ret;
7173
7174         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7175                 return 0;
7176
7177         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7178                 switch (rule->state) {
7179                 case HCLGE_FD_TO_DEL:
7180                 case HCLGE_FD_ACTIVE:
7181                         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7182                                                    rule->location, NULL, false);
7183                         if (ret)
7184                                 return ret;
7185                         fallthrough;
7186                 case HCLGE_FD_TO_ADD:
7187                         hclge_fd_dec_rule_cnt(hdev, rule->location);
7188                         hlist_del(&rule->rule_node);
7189                         kfree(rule);
7190                         break;
7191                 default:
7192                         break;
7193                 }
7194         }
7195         hclge_sync_fd_state(hdev);
7196
7197 #endif
7198         return 0;
7199 }
7200
7201 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7202                                     struct hclge_fd_rule *rule)
7203 {
7204         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7205                 struct flow_match_basic match;
7206                 u16 ethtype_key, ethtype_mask;
7207
7208                 flow_rule_match_basic(flow, &match);
7209                 ethtype_key = ntohs(match.key->n_proto);
7210                 ethtype_mask = ntohs(match.mask->n_proto);
7211
7212                 if (ethtype_key == ETH_P_ALL) {
7213                         ethtype_key = 0;
7214                         ethtype_mask = 0;
7215                 }
7216                 rule->tuples.ether_proto = ethtype_key;
7217                 rule->tuples_mask.ether_proto = ethtype_mask;
7218                 rule->tuples.ip_proto = match.key->ip_proto;
7219                 rule->tuples_mask.ip_proto = match.mask->ip_proto;
7220         } else {
7221                 rule->unused_tuple |= BIT(INNER_IP_PROTO);
7222                 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7223         }
7224 }
7225
7226 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7227                                   struct hclge_fd_rule *rule)
7228 {
7229         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7230                 struct flow_match_eth_addrs match;
7231
7232                 flow_rule_match_eth_addrs(flow, &match);
7233                 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7234                 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7235                 ether_addr_copy(rule->tuples.src_mac, match.key->src);
7236                 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7237         } else {
7238                 rule->unused_tuple |= BIT(INNER_DST_MAC);
7239                 rule->unused_tuple |= BIT(INNER_SRC_MAC);
7240         }
7241 }
7242
7243 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7244                                    struct hclge_fd_rule *rule)
7245 {
7246         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7247                 struct flow_match_vlan match;
7248
7249                 flow_rule_match_vlan(flow, &match);
7250                 rule->tuples.vlan_tag1 = match.key->vlan_id |
7251                                 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
7252                 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7253                                 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7254         } else {
7255                 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7256         }
7257 }
7258
7259 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7260                                  struct hclge_fd_rule *rule)
7261 {
7262         u16 addr_type = 0;
7263
7264         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7265                 struct flow_match_control match;
7266
7267                 flow_rule_match_control(flow, &match);
7268                 addr_type = match.key->addr_type;
7269         }
7270
7271         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7272                 struct flow_match_ipv4_addrs match;
7273
7274                 flow_rule_match_ipv4_addrs(flow, &match);
7275                 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7276                 rule->tuples_mask.src_ip[IPV4_INDEX] =
7277                                                 be32_to_cpu(match.mask->src);
7278                 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7279                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
7280                                                 be32_to_cpu(match.mask->dst);
7281         } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7282                 struct flow_match_ipv6_addrs match;
7283
7284                 flow_rule_match_ipv6_addrs(flow, &match);
7285                 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7286                                   IPV6_SIZE);
7287                 be32_to_cpu_array(rule->tuples_mask.src_ip,
7288                                   match.mask->src.s6_addr32, IPV6_SIZE);
7289                 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7290                                   IPV6_SIZE);
7291                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
7292                                   match.mask->dst.s6_addr32, IPV6_SIZE);
7293         } else {
7294                 rule->unused_tuple |= BIT(INNER_SRC_IP);
7295                 rule->unused_tuple |= BIT(INNER_DST_IP);
7296         }
7297 }
7298
7299 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7300                                    struct hclge_fd_rule *rule)
7301 {
7302         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7303                 struct flow_match_ports match;
7304
7305                 flow_rule_match_ports(flow, &match);
7306
7307                 rule->tuples.src_port = be16_to_cpu(match.key->src);
7308                 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7309                 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7310                 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7311         } else {
7312                 rule->unused_tuple |= BIT(INNER_SRC_PORT);
7313                 rule->unused_tuple |= BIT(INNER_DST_PORT);
7314         }
7315 }
7316
7317 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7318                                   struct flow_cls_offload *cls_flower,
7319                                   struct hclge_fd_rule *rule)
7320 {
7321         struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7322         struct flow_dissector *dissector = flow->match.dissector;
7323
7324         if (dissector->used_keys &
7325             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7326               BIT(FLOW_DISSECTOR_KEY_BASIC) |
7327               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7328               BIT(FLOW_DISSECTOR_KEY_VLAN) |
7329               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7330               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7331               BIT(FLOW_DISSECTOR_KEY_PORTS))) {
7332                 dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
7333                         dissector->used_keys);
7334                 return -EOPNOTSUPP;
7335         }
7336
7337         hclge_get_cls_key_basic(flow, rule);
7338         hclge_get_cls_key_mac(flow, rule);
7339         hclge_get_cls_key_vlan(flow, rule);
7340         hclge_get_cls_key_ip(flow, rule);
7341         hclge_get_cls_key_port(flow, rule);
7342
7343         return 0;
7344 }
7345
7346 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7347                                   struct flow_cls_offload *cls_flower, int tc)
7348 {
7349         u32 prio = cls_flower->common.prio;
7350
7351         if (tc < 0 || tc > hdev->tc_max) {
7352                 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7353                 return -EINVAL;
7354         }
7355
7356         if (prio == 0 ||
7357             prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7358                 dev_err(&hdev->pdev->dev,
7359                         "prio %u should be in range[1, %u]\n",
7360                         prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7361                 return -EINVAL;
7362         }
7363
7364         if (test_bit(prio - 1, hdev->fd_bmap)) {
7365                 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7366                 return -EINVAL;
7367         }
7368         return 0;
7369 }
7370
7371 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7372                                 struct flow_cls_offload *cls_flower,
7373                                 int tc)
7374 {
7375         struct hclge_vport *vport = hclge_get_vport(handle);
7376         struct hclge_dev *hdev = vport->back;
7377         struct hclge_fd_rule *rule;
7378         int ret;
7379
7380         ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7381         if (ret) {
7382                 dev_err(&hdev->pdev->dev,
7383                         "failed to check cls flower params, ret = %d\n", ret);
7384                 return ret;
7385         }
7386
7387         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7388         if (!rule)
7389                 return -ENOMEM;
7390
7391         ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7392         if (ret) {
7393                 kfree(rule);
7394                 return ret;
7395         }
7396
7397         rule->action = HCLGE_FD_ACTION_SELECT_TC;
7398         rule->cls_flower.tc = tc;
7399         rule->location = cls_flower->common.prio - 1;
7400         rule->vf_id = 0;
7401         rule->cls_flower.cookie = cls_flower->cookie;
7402         rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7403
7404         ret = hclge_add_fd_entry_common(hdev, rule);
7405         if (ret)
7406                 kfree(rule);
7407
7408         return ret;
7409 }
7410
7411 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7412                                                    unsigned long cookie)
7413 {
7414         struct hclge_fd_rule *rule;
7415         struct hlist_node *node;
7416
7417         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7418                 if (rule->cls_flower.cookie == cookie)
7419                         return rule;
7420         }
7421
7422         return NULL;
7423 }
7424
7425 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7426                                 struct flow_cls_offload *cls_flower)
7427 {
7428         struct hclge_vport *vport = hclge_get_vport(handle);
7429         struct hclge_dev *hdev = vport->back;
7430         struct hclge_fd_rule *rule;
7431         int ret;
7432
7433         spin_lock_bh(&hdev->fd_rule_lock);
7434
7435         rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7436         if (!rule) {
7437                 spin_unlock_bh(&hdev->fd_rule_lock);
7438                 return -EINVAL;
7439         }
7440
7441         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7442                                    NULL, false);
7443         if (ret) {
7444                 spin_unlock_bh(&hdev->fd_rule_lock);
7445                 return ret;
7446         }
7447
7448         hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7449         spin_unlock_bh(&hdev->fd_rule_lock);
7450
7451         return 0;
7452 }
7453
7454 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7455 {
7456         struct hclge_fd_rule *rule;
7457         struct hlist_node *node;
7458         int ret = 0;
7459
7460         if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7461                 return;
7462
7463         spin_lock_bh(&hdev->fd_rule_lock);
7464
7465         hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7466                 switch (rule->state) {
7467                 case HCLGE_FD_TO_ADD:
7468                         ret = hclge_fd_config_rule(hdev, rule);
7469                         if (ret)
7470                                 goto out;
7471                         rule->state = HCLGE_FD_ACTIVE;
7472                         break;
7473                 case HCLGE_FD_TO_DEL:
7474                         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7475                                                    rule->location, NULL, false);
7476                         if (ret)
7477                                 goto out;
7478                         hclge_fd_dec_rule_cnt(hdev, rule->location);
7479                         hclge_fd_free_node(hdev, rule);
7480                         break;
7481                 default:
7482                         break;
7483                 }
7484         }
7485
7486 out:
7487         if (ret)
7488                 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7489
7490         spin_unlock_bh(&hdev->fd_rule_lock);
7491 }
7492
7493 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7494 {
7495         if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7496                 bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7497
7498                 hclge_clear_fd_rules_in_list(hdev, clear_list);
7499         }
7500
7501         hclge_sync_fd_user_def_cfg(hdev, false);
7502
7503         hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7504 }
7505
7506 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7507 {
7508         struct hclge_vport *vport = hclge_get_vport(handle);
7509         struct hclge_dev *hdev = vport->back;
7510
7511         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7512                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7513 }
7514
7515 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7516 {
7517         struct hclge_vport *vport = hclge_get_vport(handle);
7518         struct hclge_dev *hdev = vport->back;
7519
7520         return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7521 }
7522
7523 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7524 {
7525         struct hclge_vport *vport = hclge_get_vport(handle);
7526         struct hclge_dev *hdev = vport->back;
7527
7528         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7529 }
7530
7531 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7532 {
7533         struct hclge_vport *vport = hclge_get_vport(handle);
7534         struct hclge_dev *hdev = vport->back;
7535
7536         return hdev->rst_stats.hw_reset_done_cnt;
7537 }
7538
7539 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7540 {
7541         struct hclge_vport *vport = hclge_get_vport(handle);
7542         struct hclge_dev *hdev = vport->back;
7543
7544         hdev->fd_en = enable;
7545
7546         if (!enable)
7547                 set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7548         else
7549                 hclge_restore_fd_entries(handle);
7550
7551         hclge_task_schedule(hdev, 0);
7552 }
7553
7554 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7555 {
7556         struct hclge_desc desc;
7557         struct hclge_config_mac_mode_cmd *req =
7558                 (struct hclge_config_mac_mode_cmd *)desc.data;
7559         u32 loop_en = 0;
7560         int ret;
7561
7562         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7563
7564         if (enable) {
7565                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7566                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7567                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7568                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7569                 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7570                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7571                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7572                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7573                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7574                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7575         }
7576
7577         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7578
7579         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7580         if (ret)
7581                 dev_err(&hdev->pdev->dev,
7582                         "mac enable fail, ret =%d.\n", ret);
7583 }
7584
7585 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7586                                      u8 switch_param, u8 param_mask)
7587 {
7588         struct hclge_mac_vlan_switch_cmd *req;
7589         struct hclge_desc desc;
7590         u32 func_id;
7591         int ret;
7592
7593         func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7594         req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7595
7596         /* read current config parameter */
7597         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7598                                    true);
7599         req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7600         req->func_id = cpu_to_le32(func_id);
7601
7602         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7603         if (ret) {
7604                 dev_err(&hdev->pdev->dev,
7605                         "read mac vlan switch parameter fail, ret = %d\n", ret);
7606                 return ret;
7607         }
7608
7609         /* modify and write new config parameter */
7610         hclge_cmd_reuse_desc(&desc, false);
7611         req->switch_param = (req->switch_param & param_mask) | switch_param;
7612         req->param_mask = param_mask;
7613
7614         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7615         if (ret)
7616                 dev_err(&hdev->pdev->dev,
7617                         "set mac vlan switch parameter fail, ret = %d\n", ret);
7618         return ret;
7619 }
7620
7621 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7622                                        int link_ret)
7623 {
7624 #define HCLGE_PHY_LINK_STATUS_NUM  200
7625
7626         struct phy_device *phydev = hdev->hw.mac.phydev;
7627         int i = 0;
7628         int ret;
7629
7630         do {
7631                 ret = phy_read_status(phydev);
7632                 if (ret) {
7633                         dev_err(&hdev->pdev->dev,
7634                                 "phy update link status fail, ret = %d\n", ret);
7635                         return;
7636                 }
7637
7638                 if (phydev->link == link_ret)
7639                         break;
7640
7641                 msleep(HCLGE_LINK_STATUS_MS);
7642         } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7643 }
7644
7645 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7646 {
7647 #define HCLGE_MAC_LINK_STATUS_NUM  100
7648
7649         int link_status;
7650         int i = 0;
7651         int ret;
7652
7653         do {
7654                 ret = hclge_get_mac_link_status(hdev, &link_status);
7655                 if (ret)
7656                         return ret;
7657                 if (link_status == link_ret)
7658                         return 0;
7659
7660                 msleep(HCLGE_LINK_STATUS_MS);
7661         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7662         return -EBUSY;
7663 }
7664
7665 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7666                                           bool is_phy)
7667 {
7668         int link_ret;
7669
7670         link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7671
7672         if (is_phy)
7673                 hclge_phy_link_status_wait(hdev, link_ret);
7674
7675         return hclge_mac_link_status_wait(hdev, link_ret);
7676 }
7677
7678 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7679 {
7680         struct hclge_config_mac_mode_cmd *req;
7681         struct hclge_desc desc;
7682         u32 loop_en;
7683         int ret;
7684
7685         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7686         /* 1 Read out the MAC mode config at first */
7687         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7688         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7689         if (ret) {
7690                 dev_err(&hdev->pdev->dev,
7691                         "mac loopback get fail, ret =%d.\n", ret);
7692                 return ret;
7693         }
7694
7695         /* 2 Then setup the loopback flag */
7696         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7697         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7698
7699         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7700
7701         /* 3 Config mac work mode with loopback flag
7702          * and its original configure parameters
7703          */
7704         hclge_cmd_reuse_desc(&desc, false);
7705         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7706         if (ret)
7707                 dev_err(&hdev->pdev->dev,
7708                         "mac loopback set fail, ret =%d.\n", ret);
7709         return ret;
7710 }
7711
7712 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7713                                      enum hnae3_loop loop_mode)
7714 {
7715 #define HCLGE_COMMON_LB_RETRY_MS        10
7716 #define HCLGE_COMMON_LB_RETRY_NUM       100
7717
7718         struct hclge_common_lb_cmd *req;
7719         struct hclge_desc desc;
7720         int ret, i = 0;
7721         u8 loop_mode_b;
7722
7723         req = (struct hclge_common_lb_cmd *)desc.data;
7724         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7725
7726         switch (loop_mode) {
7727         case HNAE3_LOOP_SERIAL_SERDES:
7728                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7729                 break;
7730         case HNAE3_LOOP_PARALLEL_SERDES:
7731                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7732                 break;
7733         case HNAE3_LOOP_PHY:
7734                 loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7735                 break;
7736         default:
7737                 dev_err(&hdev->pdev->dev,
7738                         "unsupported common loopback mode %d\n", loop_mode);
7739                 return -ENOTSUPP;
7740         }
7741
7742         if (en) {
7743                 req->enable = loop_mode_b;
7744                 req->mask = loop_mode_b;
7745         } else {
7746                 req->mask = loop_mode_b;
7747         }
7748
7749         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7750         if (ret) {
7751                 dev_err(&hdev->pdev->dev,
7752                         "common loopback set fail, ret = %d\n", ret);
7753                 return ret;
7754         }
7755
7756         do {
7757                 msleep(HCLGE_COMMON_LB_RETRY_MS);
7758                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7759                                            true);
7760                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7761                 if (ret) {
7762                         dev_err(&hdev->pdev->dev,
7763                                 "common loopback get, ret = %d\n", ret);
7764                         return ret;
7765                 }
7766         } while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7767                  !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7768
7769         if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7770                 dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
7771                 return -EBUSY;
7772         } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7773                 dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
7774                 return -EIO;
7775         }
7776         return ret;
7777 }
7778
7779 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7780                                      enum hnae3_loop loop_mode)
7781 {
7782         int ret;
7783
7784         ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7785         if (ret)
7786                 return ret;
7787
7788         hclge_cfg_mac_mode(hdev, en);
7789
7790         ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7791         if (ret)
7792                 dev_err(&hdev->pdev->dev,
7793                         "serdes loopback config mac mode timeout\n");
7794
7795         return ret;
7796 }
7797
7798 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7799                                      struct phy_device *phydev)
7800 {
7801         int ret;
7802
7803         if (!phydev->suspended) {
7804                 ret = phy_suspend(phydev);
7805                 if (ret)
7806                         return ret;
7807         }
7808
7809         ret = phy_resume(phydev);
7810         if (ret)
7811                 return ret;
7812
7813         return phy_loopback(phydev, true);
7814 }
7815
7816 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7817                                       struct phy_device *phydev)
7818 {
7819         int ret;
7820
7821         ret = phy_loopback(phydev, false);
7822         if (ret)
7823                 return ret;
7824
7825         return phy_suspend(phydev);
7826 }
7827
7828 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7829 {
7830         struct phy_device *phydev = hdev->hw.mac.phydev;
7831         int ret;
7832
7833         if (!phydev) {
7834                 if (hnae3_dev_phy_imp_supported(hdev))
7835                         return hclge_set_common_loopback(hdev, en,
7836                                                          HNAE3_LOOP_PHY);
7837                 return -ENOTSUPP;
7838         }
7839
7840         if (en)
7841                 ret = hclge_enable_phy_loopback(hdev, phydev);
7842         else
7843                 ret = hclge_disable_phy_loopback(hdev, phydev);
7844         if (ret) {
7845                 dev_err(&hdev->pdev->dev,
7846                         "set phy loopback fail, ret = %d\n", ret);
7847                 return ret;
7848         }
7849
7850         hclge_cfg_mac_mode(hdev, en);
7851
7852         ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7853         if (ret)
7854                 dev_err(&hdev->pdev->dev,
7855                         "phy loopback config mac mode timeout\n");
7856
7857         return ret;
7858 }
7859
7860 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
7861                                      u16 stream_id, bool enable)
7862 {
7863         struct hclge_desc desc;
7864         struct hclge_cfg_com_tqp_queue_cmd *req =
7865                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7866
7867         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7868         req->tqp_id = cpu_to_le16(tqp_id);
7869         req->stream_id = cpu_to_le16(stream_id);
7870         if (enable)
7871                 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7872
7873         return hclge_cmd_send(&hdev->hw, &desc, 1);
7874 }
7875
7876 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
7877 {
7878         struct hclge_vport *vport = hclge_get_vport(handle);
7879         struct hclge_dev *hdev = vport->back;
7880         int ret;
7881         u16 i;
7882
7883         for (i = 0; i < handle->kinfo.num_tqps; i++) {
7884                 ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
7885                 if (ret)
7886                         return ret;
7887         }
7888         return 0;
7889 }
7890
7891 static int hclge_set_loopback(struct hnae3_handle *handle,
7892                               enum hnae3_loop loop_mode, bool en)
7893 {
7894         struct hclge_vport *vport = hclge_get_vport(handle);
7895         struct hclge_dev *hdev = vport->back;
7896         int ret;
7897
7898         /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7899          * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7900          * the same, the packets are looped back in the SSU. If SSU loopback
7901          * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7902          */
7903         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7904                 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7905
7906                 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7907                                                 HCLGE_SWITCH_ALW_LPBK_MASK);
7908                 if (ret)
7909                         return ret;
7910         }
7911
7912         switch (loop_mode) {
7913         case HNAE3_LOOP_APP:
7914                 ret = hclge_set_app_loopback(hdev, en);
7915                 break;
7916         case HNAE3_LOOP_SERIAL_SERDES:
7917         case HNAE3_LOOP_PARALLEL_SERDES:
7918                 ret = hclge_set_common_loopback(hdev, en, loop_mode);
7919                 break;
7920         case HNAE3_LOOP_PHY:
7921                 ret = hclge_set_phy_loopback(hdev, en);
7922                 break;
7923         default:
7924                 ret = -ENOTSUPP;
7925                 dev_err(&hdev->pdev->dev,
7926                         "loop_mode %d is not supported\n", loop_mode);
7927                 break;
7928         }
7929
7930         if (ret)
7931                 return ret;
7932
7933         ret = hclge_tqp_enable(handle, en);
7934         if (ret)
7935                 dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
7936                         en ? "enable" : "disable", ret);
7937
7938         return ret;
7939 }
7940
7941 static int hclge_set_default_loopback(struct hclge_dev *hdev)
7942 {
7943         int ret;
7944
7945         ret = hclge_set_app_loopback(hdev, false);
7946         if (ret)
7947                 return ret;
7948
7949         ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
7950         if (ret)
7951                 return ret;
7952
7953         return hclge_cfg_common_loopback(hdev, false,
7954                                          HNAE3_LOOP_PARALLEL_SERDES);
7955 }
7956
7957 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
7958 {
7959         struct hclge_vport *vport = hclge_get_vport(handle);
7960         struct hnae3_knic_private_info *kinfo;
7961         struct hnae3_queue *queue;
7962         struct hclge_tqp *tqp;
7963         int i;
7964
7965         kinfo = &vport->nic.kinfo;
7966         for (i = 0; i < kinfo->num_tqps; i++) {
7967                 queue = handle->kinfo.tqp[i];
7968                 tqp = container_of(queue, struct hclge_tqp, q);
7969                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
7970         }
7971 }
7972
7973 static void hclge_flush_link_update(struct hclge_dev *hdev)
7974 {
7975 #define HCLGE_FLUSH_LINK_TIMEOUT        100000
7976
7977         unsigned long last = hdev->serv_processed_cnt;
7978         int i = 0;
7979
7980         while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
7981                i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
7982                last == hdev->serv_processed_cnt)
7983                 usleep_range(1, 1);
7984 }
7985
7986 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
7987 {
7988         struct hclge_vport *vport = hclge_get_vport(handle);
7989         struct hclge_dev *hdev = vport->back;
7990
7991         if (enable) {
7992                 hclge_task_schedule(hdev, 0);
7993         } else {
7994                 /* Set the DOWN flag here to disable link updating */
7995                 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7996
7997                 /* flush memory to make sure DOWN is seen by service task */
7998                 smp_mb__before_atomic();
7999                 hclge_flush_link_update(hdev);
8000         }
8001 }
8002
8003 static int hclge_ae_start(struct hnae3_handle *handle)
8004 {
8005         struct hclge_vport *vport = hclge_get_vport(handle);
8006         struct hclge_dev *hdev = vport->back;
8007
8008         /* mac enable */
8009         hclge_cfg_mac_mode(hdev, true);
8010         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
8011         hdev->hw.mac.link = 0;
8012
8013         /* reset tqp stats */
8014         hclge_reset_tqp_stats(handle);
8015
8016         hclge_mac_start_phy(hdev);
8017
8018         return 0;
8019 }
8020
8021 static void hclge_ae_stop(struct hnae3_handle *handle)
8022 {
8023         struct hclge_vport *vport = hclge_get_vport(handle);
8024         struct hclge_dev *hdev = vport->back;
8025
8026         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8027         spin_lock_bh(&hdev->fd_rule_lock);
8028         hclge_clear_arfs_rules(hdev);
8029         spin_unlock_bh(&hdev->fd_rule_lock);
8030
8031         /* If it is not PF reset, the firmware will disable the MAC,
8032          * so it only need to stop phy here.
8033          */
8034         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
8035             hdev->reset_type != HNAE3_FUNC_RESET) {
8036                 hclge_mac_stop_phy(hdev);
8037                 hclge_update_link_status(hdev);
8038                 return;
8039         }
8040
8041         hclge_reset_tqp(handle);
8042
8043         hclge_config_mac_tnl_int(hdev, false);
8044
8045         /* Mac disable */
8046         hclge_cfg_mac_mode(hdev, false);
8047
8048         hclge_mac_stop_phy(hdev);
8049
8050         /* reset tqp stats */
8051         hclge_reset_tqp_stats(handle);
8052         hclge_update_link_status(hdev);
8053 }
8054
8055 int hclge_vport_start(struct hclge_vport *vport)
8056 {
8057         struct hclge_dev *hdev = vport->back;
8058
8059         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8060         set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
8061         vport->last_active_jiffies = jiffies;
8062
8063         if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8064                 if (vport->vport_id) {
8065                         hclge_restore_mac_table_common(vport);
8066                         hclge_restore_vport_vlan_table(vport);
8067                 } else {
8068                         hclge_restore_hw_table(hdev);
8069                 }
8070         }
8071
8072         clear_bit(vport->vport_id, hdev->vport_config_block);
8073
8074         return 0;
8075 }
8076
8077 void hclge_vport_stop(struct hclge_vport *vport)
8078 {
8079         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8080 }
8081
8082 static int hclge_client_start(struct hnae3_handle *handle)
8083 {
8084         struct hclge_vport *vport = hclge_get_vport(handle);
8085
8086         return hclge_vport_start(vport);
8087 }
8088
8089 static void hclge_client_stop(struct hnae3_handle *handle)
8090 {
8091         struct hclge_vport *vport = hclge_get_vport(handle);
8092
8093         hclge_vport_stop(vport);
8094 }
8095
8096 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8097                                          u16 cmdq_resp, u8  resp_code,
8098                                          enum hclge_mac_vlan_tbl_opcode op)
8099 {
8100         struct hclge_dev *hdev = vport->back;
8101
8102         if (cmdq_resp) {
8103                 dev_err(&hdev->pdev->dev,
8104                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8105                         cmdq_resp);
8106                 return -EIO;
8107         }
8108
8109         if (op == HCLGE_MAC_VLAN_ADD) {
8110                 if (!resp_code || resp_code == 1)
8111                         return 0;
8112                 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8113                          resp_code == HCLGE_ADD_MC_OVERFLOW)
8114                         return -ENOSPC;
8115
8116                 dev_err(&hdev->pdev->dev,
8117                         "add mac addr failed for undefined, code=%u.\n",
8118                         resp_code);
8119                 return -EIO;
8120         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
8121                 if (!resp_code) {
8122                         return 0;
8123                 } else if (resp_code == 1) {
8124                         dev_dbg(&hdev->pdev->dev,
8125                                 "remove mac addr failed for miss.\n");
8126                         return -ENOENT;
8127                 }
8128
8129                 dev_err(&hdev->pdev->dev,
8130                         "remove mac addr failed for undefined, code=%u.\n",
8131                         resp_code);
8132                 return -EIO;
8133         } else if (op == HCLGE_MAC_VLAN_LKUP) {
8134                 if (!resp_code) {
8135                         return 0;
8136                 } else if (resp_code == 1) {
8137                         dev_dbg(&hdev->pdev->dev,
8138                                 "lookup mac addr failed for miss.\n");
8139                         return -ENOENT;
8140                 }
8141
8142                 dev_err(&hdev->pdev->dev,
8143                         "lookup mac addr failed for undefined, code=%u.\n",
8144                         resp_code);
8145                 return -EIO;
8146         }
8147
8148         dev_err(&hdev->pdev->dev,
8149                 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8150
8151         return -EINVAL;
8152 }
8153
8154 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8155 {
8156 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8157
8158         unsigned int word_num;
8159         unsigned int bit_num;
8160
8161         if (vfid > 255 || vfid < 0)
8162                 return -EIO;
8163
8164         if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8165                 word_num = vfid / 32;
8166                 bit_num  = vfid % 32;
8167                 if (clr)
8168                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8169                 else
8170                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8171         } else {
8172                 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8173                 bit_num  = vfid % 32;
8174                 if (clr)
8175                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8176                 else
8177                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8178         }
8179
8180         return 0;
8181 }
8182
8183 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8184 {
8185 #define HCLGE_DESC_NUMBER 3
8186 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8187         int i, j;
8188
8189         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8190                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8191                         if (desc[i].data[j])
8192                                 return false;
8193
8194         return true;
8195 }
8196
8197 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8198                                    const u8 *addr, bool is_mc)
8199 {
8200         const unsigned char *mac_addr = addr;
8201         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8202                        (mac_addr[0]) | (mac_addr[1] << 8);
8203         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
8204
8205         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8206         if (is_mc) {
8207                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8208                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8209         }
8210
8211         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8212         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8213 }
8214
8215 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8216                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
8217 {
8218         struct hclge_dev *hdev = vport->back;
8219         struct hclge_desc desc;
8220         u8 resp_code;
8221         u16 retval;
8222         int ret;
8223
8224         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8225
8226         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8227
8228         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8229         if (ret) {
8230                 dev_err(&hdev->pdev->dev,
8231                         "del mac addr failed for cmd_send, ret =%d.\n",
8232                         ret);
8233                 return ret;
8234         }
8235         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8236         retval = le16_to_cpu(desc.retval);
8237
8238         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8239                                              HCLGE_MAC_VLAN_REMOVE);
8240 }
8241
8242 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8243                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
8244                                      struct hclge_desc *desc,
8245                                      bool is_mc)
8246 {
8247         struct hclge_dev *hdev = vport->back;
8248         u8 resp_code;
8249         u16 retval;
8250         int ret;
8251
8252         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8253         if (is_mc) {
8254                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8255                 memcpy(desc[0].data,
8256                        req,
8257                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8258                 hclge_cmd_setup_basic_desc(&desc[1],
8259                                            HCLGE_OPC_MAC_VLAN_ADD,
8260                                            true);
8261                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8262                 hclge_cmd_setup_basic_desc(&desc[2],
8263                                            HCLGE_OPC_MAC_VLAN_ADD,
8264                                            true);
8265                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
8266         } else {
8267                 memcpy(desc[0].data,
8268                        req,
8269                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8270                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
8271         }
8272         if (ret) {
8273                 dev_err(&hdev->pdev->dev,
8274                         "lookup mac addr failed for cmd_send, ret =%d.\n",
8275                         ret);
8276                 return ret;
8277         }
8278         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8279         retval = le16_to_cpu(desc[0].retval);
8280
8281         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8282                                              HCLGE_MAC_VLAN_LKUP);
8283 }
8284
8285 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8286                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
8287                                   struct hclge_desc *mc_desc)
8288 {
8289         struct hclge_dev *hdev = vport->back;
8290         int cfg_status;
8291         u8 resp_code;
8292         u16 retval;
8293         int ret;
8294
8295         if (!mc_desc) {
8296                 struct hclge_desc desc;
8297
8298                 hclge_cmd_setup_basic_desc(&desc,
8299                                            HCLGE_OPC_MAC_VLAN_ADD,
8300                                            false);
8301                 memcpy(desc.data, req,
8302                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8303                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8304                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8305                 retval = le16_to_cpu(desc.retval);
8306
8307                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8308                                                            resp_code,
8309                                                            HCLGE_MAC_VLAN_ADD);
8310         } else {
8311                 hclge_cmd_reuse_desc(&mc_desc[0], false);
8312                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8313                 hclge_cmd_reuse_desc(&mc_desc[1], false);
8314                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8315                 hclge_cmd_reuse_desc(&mc_desc[2], false);
8316                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
8317                 memcpy(mc_desc[0].data, req,
8318                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8319                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8320                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8321                 retval = le16_to_cpu(mc_desc[0].retval);
8322
8323                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8324                                                            resp_code,
8325                                                            HCLGE_MAC_VLAN_ADD);
8326         }
8327
8328         if (ret) {
8329                 dev_err(&hdev->pdev->dev,
8330                         "add mac addr failed for cmd_send, ret =%d.\n",
8331                         ret);
8332                 return ret;
8333         }
8334
8335         return cfg_status;
8336 }
8337
8338 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8339                                u16 *allocated_size)
8340 {
8341         struct hclge_umv_spc_alc_cmd *req;
8342         struct hclge_desc desc;
8343         int ret;
8344
8345         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8346         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8347
8348         req->space_size = cpu_to_le32(space_size);
8349
8350         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8351         if (ret) {
8352                 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8353                         ret);
8354                 return ret;
8355         }
8356
8357         *allocated_size = le32_to_cpu(desc.data[1]);
8358
8359         return 0;
8360 }
8361
8362 static int hclge_init_umv_space(struct hclge_dev *hdev)
8363 {
8364         u16 allocated_size = 0;
8365         int ret;
8366
8367         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8368         if (ret)
8369                 return ret;
8370
8371         if (allocated_size < hdev->wanted_umv_size)
8372                 dev_warn(&hdev->pdev->dev,
8373                          "failed to alloc umv space, want %u, get %u\n",
8374                          hdev->wanted_umv_size, allocated_size);
8375
8376         hdev->max_umv_size = allocated_size;
8377         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8378         hdev->share_umv_size = hdev->priv_umv_size +
8379                         hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8380
8381         return 0;
8382 }
8383
8384 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8385 {
8386         struct hclge_vport *vport;
8387         int i;
8388
8389         for (i = 0; i < hdev->num_alloc_vport; i++) {
8390                 vport = &hdev->vport[i];
8391                 vport->used_umv_num = 0;
8392         }
8393
8394         mutex_lock(&hdev->vport_lock);
8395         hdev->share_umv_size = hdev->priv_umv_size +
8396                         hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8397         mutex_unlock(&hdev->vport_lock);
8398 }
8399
8400 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8401 {
8402         struct hclge_dev *hdev = vport->back;
8403         bool is_full;
8404
8405         if (need_lock)
8406                 mutex_lock(&hdev->vport_lock);
8407
8408         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8409                    hdev->share_umv_size == 0);
8410
8411         if (need_lock)
8412                 mutex_unlock(&hdev->vport_lock);
8413
8414         return is_full;
8415 }
8416
8417 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8418 {
8419         struct hclge_dev *hdev = vport->back;
8420
8421         if (is_free) {
8422                 if (vport->used_umv_num > hdev->priv_umv_size)
8423                         hdev->share_umv_size++;
8424
8425                 if (vport->used_umv_num > 0)
8426                         vport->used_umv_num--;
8427         } else {
8428                 if (vport->used_umv_num >= hdev->priv_umv_size &&
8429                     hdev->share_umv_size > 0)
8430                         hdev->share_umv_size--;
8431                 vport->used_umv_num++;
8432         }
8433 }
8434
8435 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8436                                                   const u8 *mac_addr)
8437 {
8438         struct hclge_mac_node *mac_node, *tmp;
8439
8440         list_for_each_entry_safe(mac_node, tmp, list, node)
8441                 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8442                         return mac_node;
8443
8444         return NULL;
8445 }
8446
8447 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8448                                   enum HCLGE_MAC_NODE_STATE state)
8449 {
8450         switch (state) {
8451         /* from set_rx_mode or tmp_add_list */
8452         case HCLGE_MAC_TO_ADD:
8453                 if (mac_node->state == HCLGE_MAC_TO_DEL)
8454                         mac_node->state = HCLGE_MAC_ACTIVE;
8455                 break;
8456         /* only from set_rx_mode */
8457         case HCLGE_MAC_TO_DEL:
8458                 if (mac_node->state == HCLGE_MAC_TO_ADD) {
8459                         list_del(&mac_node->node);
8460                         kfree(mac_node);
8461                 } else {
8462                         mac_node->state = HCLGE_MAC_TO_DEL;
8463                 }
8464                 break;
8465         /* only from tmp_add_list, the mac_node->state won't be
8466          * ACTIVE.
8467          */
8468         case HCLGE_MAC_ACTIVE:
8469                 if (mac_node->state == HCLGE_MAC_TO_ADD)
8470                         mac_node->state = HCLGE_MAC_ACTIVE;
8471
8472                 break;
8473         }
8474 }
8475
8476 int hclge_update_mac_list(struct hclge_vport *vport,
8477                           enum HCLGE_MAC_NODE_STATE state,
8478                           enum HCLGE_MAC_ADDR_TYPE mac_type,
8479                           const unsigned char *addr)
8480 {
8481         struct hclge_dev *hdev = vport->back;
8482         struct hclge_mac_node *mac_node;
8483         struct list_head *list;
8484
8485         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8486                 &vport->uc_mac_list : &vport->mc_mac_list;
8487
8488         spin_lock_bh(&vport->mac_list_lock);
8489
8490         /* if the mac addr is already in the mac list, no need to add a new
8491          * one into it, just check the mac addr state, convert it to a new
8492          * state, or just remove it, or do nothing.
8493          */
8494         mac_node = hclge_find_mac_node(list, addr);
8495         if (mac_node) {
8496                 hclge_update_mac_node(mac_node, state);
8497                 spin_unlock_bh(&vport->mac_list_lock);
8498                 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8499                 return 0;
8500         }
8501
8502         /* if this address is never added, unnecessary to delete */
8503         if (state == HCLGE_MAC_TO_DEL) {
8504                 spin_unlock_bh(&vport->mac_list_lock);
8505                 dev_err(&hdev->pdev->dev,
8506                         "failed to delete address %pM from mac list\n",
8507                         addr);
8508                 return -ENOENT;
8509         }
8510
8511         mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8512         if (!mac_node) {
8513                 spin_unlock_bh(&vport->mac_list_lock);
8514                 return -ENOMEM;
8515         }
8516
8517         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8518
8519         mac_node->state = state;
8520         ether_addr_copy(mac_node->mac_addr, addr);
8521         list_add_tail(&mac_node->node, list);
8522
8523         spin_unlock_bh(&vport->mac_list_lock);
8524
8525         return 0;
8526 }
8527
8528 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8529                              const unsigned char *addr)
8530 {
8531         struct hclge_vport *vport = hclge_get_vport(handle);
8532
8533         return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8534                                      addr);
8535 }
8536
8537 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8538                              const unsigned char *addr)
8539 {
8540         struct hclge_dev *hdev = vport->back;
8541         struct hclge_mac_vlan_tbl_entry_cmd req;
8542         struct hclge_desc desc;
8543         u16 egress_port = 0;
8544         int ret;
8545
8546         /* mac addr check */
8547         if (is_zero_ether_addr(addr) ||
8548             is_broadcast_ether_addr(addr) ||
8549             is_multicast_ether_addr(addr)) {
8550                 dev_err(&hdev->pdev->dev,
8551                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8552                          addr, is_zero_ether_addr(addr),
8553                          is_broadcast_ether_addr(addr),
8554                          is_multicast_ether_addr(addr));
8555                 return -EINVAL;
8556         }
8557
8558         memset(&req, 0, sizeof(req));
8559
8560         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8561                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8562
8563         req.egress_port = cpu_to_le16(egress_port);
8564
8565         hclge_prepare_mac_addr(&req, addr, false);
8566
8567         /* Lookup the mac address in the mac_vlan table, and add
8568          * it if the entry is inexistent. Repeated unicast entry
8569          * is not allowed in the mac vlan table.
8570          */
8571         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8572         if (ret == -ENOENT) {
8573                 mutex_lock(&hdev->vport_lock);
8574                 if (!hclge_is_umv_space_full(vport, false)) {
8575                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8576                         if (!ret)
8577                                 hclge_update_umv_space(vport, false);
8578                         mutex_unlock(&hdev->vport_lock);
8579                         return ret;
8580                 }
8581                 mutex_unlock(&hdev->vport_lock);
8582
8583                 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8584                         dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8585                                 hdev->priv_umv_size);
8586
8587                 return -ENOSPC;
8588         }
8589
8590         /* check if we just hit the duplicate */
8591         if (!ret) {
8592                 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
8593                          vport->vport_id, addr);
8594                 return 0;
8595         }
8596
8597         dev_err(&hdev->pdev->dev,
8598                 "PF failed to add unicast entry(%pM) in the MAC table\n",
8599                 addr);
8600
8601         return ret;
8602 }
8603
8604 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8605                             const unsigned char *addr)
8606 {
8607         struct hclge_vport *vport = hclge_get_vport(handle);
8608
8609         return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8610                                      addr);
8611 }
8612
8613 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8614                             const unsigned char *addr)
8615 {
8616         struct hclge_dev *hdev = vport->back;
8617         struct hclge_mac_vlan_tbl_entry_cmd req;
8618         int ret;
8619
8620         /* mac addr check */
8621         if (is_zero_ether_addr(addr) ||
8622             is_broadcast_ether_addr(addr) ||
8623             is_multicast_ether_addr(addr)) {
8624                 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8625                         addr);
8626                 return -EINVAL;
8627         }
8628
8629         memset(&req, 0, sizeof(req));
8630         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8631         hclge_prepare_mac_addr(&req, addr, false);
8632         ret = hclge_remove_mac_vlan_tbl(vport, &req);
8633         if (!ret) {
8634                 mutex_lock(&hdev->vport_lock);
8635                 hclge_update_umv_space(vport, true);
8636                 mutex_unlock(&hdev->vport_lock);
8637         } else if (ret == -ENOENT) {
8638                 ret = 0;
8639         }
8640
8641         return ret;
8642 }
8643
8644 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8645                              const unsigned char *addr)
8646 {
8647         struct hclge_vport *vport = hclge_get_vport(handle);
8648
8649         return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8650                                      addr);
8651 }
8652
8653 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8654                              const unsigned char *addr)
8655 {
8656         struct hclge_dev *hdev = vport->back;
8657         struct hclge_mac_vlan_tbl_entry_cmd req;
8658         struct hclge_desc desc[3];
8659         int status;
8660
8661         /* mac addr check */
8662         if (!is_multicast_ether_addr(addr)) {
8663                 dev_err(&hdev->pdev->dev,
8664                         "Add mc mac err! invalid mac:%pM.\n",
8665                          addr);
8666                 return -EINVAL;
8667         }
8668         memset(&req, 0, sizeof(req));
8669         hclge_prepare_mac_addr(&req, addr, true);
8670         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8671         if (status) {
8672                 /* This mac addr do not exist, add new entry for it */
8673                 memset(desc[0].data, 0, sizeof(desc[0].data));
8674                 memset(desc[1].data, 0, sizeof(desc[0].data));
8675                 memset(desc[2].data, 0, sizeof(desc[0].data));
8676         }
8677         status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8678         if (status)
8679                 return status;
8680         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8681         /* if already overflow, not to print each time */
8682         if (status == -ENOSPC &&
8683             !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8684                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8685
8686         return status;
8687 }
8688
8689 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8690                             const unsigned char *addr)
8691 {
8692         struct hclge_vport *vport = hclge_get_vport(handle);
8693
8694         return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8695                                      addr);
8696 }
8697
8698 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8699                             const unsigned char *addr)
8700 {
8701         struct hclge_dev *hdev = vport->back;
8702         struct hclge_mac_vlan_tbl_entry_cmd req;
8703         enum hclge_cmd_status status;
8704         struct hclge_desc desc[3];
8705
8706         /* mac addr check */
8707         if (!is_multicast_ether_addr(addr)) {
8708                 dev_dbg(&hdev->pdev->dev,
8709                         "Remove mc mac err! invalid mac:%pM.\n",
8710                          addr);
8711                 return -EINVAL;
8712         }
8713
8714         memset(&req, 0, sizeof(req));
8715         hclge_prepare_mac_addr(&req, addr, true);
8716         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8717         if (!status) {
8718                 /* This mac addr exist, remove this handle's VFID for it */
8719                 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8720                 if (status)
8721                         return status;
8722
8723                 if (hclge_is_all_function_id_zero(desc))
8724                         /* All the vfid is zero, so need to delete this entry */
8725                         status = hclge_remove_mac_vlan_tbl(vport, &req);
8726                 else
8727                         /* Not all the vfid is zero, update the vfid */
8728                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8729         } else if (status == -ENOENT) {
8730                 status = 0;
8731         }
8732
8733         return status;
8734 }
8735
8736 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8737                                       struct list_head *list,
8738                                       int (*sync)(struct hclge_vport *,
8739                                                   const unsigned char *))
8740 {
8741         struct hclge_mac_node *mac_node, *tmp;
8742         int ret;
8743
8744         list_for_each_entry_safe(mac_node, tmp, list, node) {
8745                 ret = sync(vport, mac_node->mac_addr);
8746                 if (!ret) {
8747                         mac_node->state = HCLGE_MAC_ACTIVE;
8748                 } else {
8749                         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8750                                 &vport->state);
8751                         break;
8752                 }
8753         }
8754 }
8755
8756 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8757                                         struct list_head *list,
8758                                         int (*unsync)(struct hclge_vport *,
8759                                                       const unsigned char *))
8760 {
8761         struct hclge_mac_node *mac_node, *tmp;
8762         int ret;
8763
8764         list_for_each_entry_safe(mac_node, tmp, list, node) {
8765                 ret = unsync(vport, mac_node->mac_addr);
8766                 if (!ret || ret == -ENOENT) {
8767                         list_del(&mac_node->node);
8768                         kfree(mac_node);
8769                 } else {
8770                         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8771                                 &vport->state);
8772                         break;
8773                 }
8774         }
8775 }
8776
8777 static bool hclge_sync_from_add_list(struct list_head *add_list,
8778                                      struct list_head *mac_list)
8779 {
8780         struct hclge_mac_node *mac_node, *tmp, *new_node;
8781         bool all_added = true;
8782
8783         list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8784                 if (mac_node->state == HCLGE_MAC_TO_ADD)
8785                         all_added = false;
8786
8787                 /* if the mac address from tmp_add_list is not in the
8788                  * uc/mc_mac_list, it means have received a TO_DEL request
8789                  * during the time window of adding the mac address into mac
8790                  * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8791                  * then it will be removed at next time. else it must be TO_ADD,
8792                  * this address hasn't been added into mac table,
8793                  * so just remove the mac node.
8794                  */
8795                 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8796                 if (new_node) {
8797                         hclge_update_mac_node(new_node, mac_node->state);
8798                         list_del(&mac_node->node);
8799                         kfree(mac_node);
8800                 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8801                         mac_node->state = HCLGE_MAC_TO_DEL;
8802                         list_del(&mac_node->node);
8803                         list_add_tail(&mac_node->node, mac_list);
8804                 } else {
8805                         list_del(&mac_node->node);
8806                         kfree(mac_node);
8807                 }
8808         }
8809
8810         return all_added;
8811 }
8812
8813 static void hclge_sync_from_del_list(struct list_head *del_list,
8814                                      struct list_head *mac_list)
8815 {
8816         struct hclge_mac_node *mac_node, *tmp, *new_node;
8817
8818         list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8819                 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8820                 if (new_node) {
8821                         /* If the mac addr exists in the mac list, it means
8822                          * received a new TO_ADD request during the time window
8823                          * of configuring the mac address. For the mac node
8824                          * state is TO_ADD, and the address is already in the
8825                          * in the hardware(due to delete fail), so we just need
8826                          * to change the mac node state to ACTIVE.
8827                          */
8828                         new_node->state = HCLGE_MAC_ACTIVE;
8829                         list_del(&mac_node->node);
8830                         kfree(mac_node);
8831                 } else {
8832                         list_del(&mac_node->node);
8833                         list_add_tail(&mac_node->node, mac_list);
8834                 }
8835         }
8836 }
8837
8838 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8839                                         enum HCLGE_MAC_ADDR_TYPE mac_type,
8840                                         bool is_all_added)
8841 {
8842         if (mac_type == HCLGE_MAC_ADDR_UC) {
8843                 if (is_all_added)
8844                         vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8845                 else
8846                         vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8847         } else {
8848                 if (is_all_added)
8849                         vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8850                 else
8851                         vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8852         }
8853 }
8854
8855 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8856                                        enum HCLGE_MAC_ADDR_TYPE mac_type)
8857 {
8858         struct hclge_mac_node *mac_node, *tmp, *new_node;
8859         struct list_head tmp_add_list, tmp_del_list;
8860         struct list_head *list;
8861         bool all_added;
8862
8863         INIT_LIST_HEAD(&tmp_add_list);
8864         INIT_LIST_HEAD(&tmp_del_list);
8865
8866         /* move the mac addr to the tmp_add_list and tmp_del_list, then
8867          * we can add/delete these mac addr outside the spin lock
8868          */
8869         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8870                 &vport->uc_mac_list : &vport->mc_mac_list;
8871
8872         spin_lock_bh(&vport->mac_list_lock);
8873
8874         list_for_each_entry_safe(mac_node, tmp, list, node) {
8875                 switch (mac_node->state) {
8876                 case HCLGE_MAC_TO_DEL:
8877                         list_del(&mac_node->node);
8878                         list_add_tail(&mac_node->node, &tmp_del_list);
8879                         break;
8880                 case HCLGE_MAC_TO_ADD:
8881                         new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8882                         if (!new_node)
8883                                 goto stop_traverse;
8884                         ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8885                         new_node->state = mac_node->state;
8886                         list_add_tail(&new_node->node, &tmp_add_list);
8887                         break;
8888                 default:
8889                         break;
8890                 }
8891         }
8892
8893 stop_traverse:
8894         spin_unlock_bh(&vport->mac_list_lock);
8895
8896         /* delete first, in order to get max mac table space for adding */
8897         if (mac_type == HCLGE_MAC_ADDR_UC) {
8898                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8899                                             hclge_rm_uc_addr_common);
8900                 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8901                                           hclge_add_uc_addr_common);
8902         } else {
8903                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8904                                             hclge_rm_mc_addr_common);
8905                 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8906                                           hclge_add_mc_addr_common);
8907         }
8908
8909         /* if some mac addresses were added/deleted fail, move back to the
8910          * mac_list, and retry at next time.
8911          */
8912         spin_lock_bh(&vport->mac_list_lock);
8913
8914         hclge_sync_from_del_list(&tmp_del_list, list);
8915         all_added = hclge_sync_from_add_list(&tmp_add_list, list);
8916
8917         spin_unlock_bh(&vport->mac_list_lock);
8918
8919         hclge_update_overflow_flags(vport, mac_type, all_added);
8920 }
8921
8922 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
8923 {
8924         struct hclge_dev *hdev = vport->back;
8925
8926         if (test_bit(vport->vport_id, hdev->vport_config_block))
8927                 return false;
8928
8929         if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
8930                 return true;
8931
8932         return false;
8933 }
8934
8935 static void hclge_sync_mac_table(struct hclge_dev *hdev)
8936 {
8937         int i;
8938
8939         for (i = 0; i < hdev->num_alloc_vport; i++) {
8940                 struct hclge_vport *vport = &hdev->vport[i];
8941
8942                 if (!hclge_need_sync_mac_table(vport))
8943                         continue;
8944
8945                 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
8946                 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
8947         }
8948 }
8949
8950 static void hclge_build_del_list(struct list_head *list,
8951                                  bool is_del_list,
8952                                  struct list_head *tmp_del_list)
8953 {
8954         struct hclge_mac_node *mac_cfg, *tmp;
8955
8956         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
8957                 switch (mac_cfg->state) {
8958                 case HCLGE_MAC_TO_DEL:
8959                 case HCLGE_MAC_ACTIVE:
8960                         list_del(&mac_cfg->node);
8961                         list_add_tail(&mac_cfg->node, tmp_del_list);
8962                         break;
8963                 case HCLGE_MAC_TO_ADD:
8964                         if (is_del_list) {
8965                                 list_del(&mac_cfg->node);
8966                                 kfree(mac_cfg);
8967                         }
8968                         break;
8969                 }
8970         }
8971 }
8972
8973 static void hclge_unsync_del_list(struct hclge_vport *vport,
8974                                   int (*unsync)(struct hclge_vport *vport,
8975                                                 const unsigned char *addr),
8976                                   bool is_del_list,
8977                                   struct list_head *tmp_del_list)
8978 {
8979         struct hclge_mac_node *mac_cfg, *tmp;
8980         int ret;
8981
8982         list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
8983                 ret = unsync(vport, mac_cfg->mac_addr);
8984                 if (!ret || ret == -ENOENT) {
8985                         /* clear all mac addr from hardware, but remain these
8986                          * mac addr in the mac list, and restore them after
8987                          * vf reset finished.
8988                          */
8989                         if (!is_del_list &&
8990                             mac_cfg->state == HCLGE_MAC_ACTIVE) {
8991                                 mac_cfg->state = HCLGE_MAC_TO_ADD;
8992                         } else {
8993                                 list_del(&mac_cfg->node);
8994                                 kfree(mac_cfg);
8995                         }
8996                 } else if (is_del_list) {
8997                         mac_cfg->state = HCLGE_MAC_TO_DEL;
8998                 }
8999         }
9000 }
9001
9002 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
9003                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
9004 {
9005         int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
9006         struct hclge_dev *hdev = vport->back;
9007         struct list_head tmp_del_list, *list;
9008
9009         if (mac_type == HCLGE_MAC_ADDR_UC) {
9010                 list = &vport->uc_mac_list;
9011                 unsync = hclge_rm_uc_addr_common;
9012         } else {
9013                 list = &vport->mc_mac_list;
9014                 unsync = hclge_rm_mc_addr_common;
9015         }
9016
9017         INIT_LIST_HEAD(&tmp_del_list);
9018
9019         if (!is_del_list)
9020                 set_bit(vport->vport_id, hdev->vport_config_block);
9021
9022         spin_lock_bh(&vport->mac_list_lock);
9023
9024         hclge_build_del_list(list, is_del_list, &tmp_del_list);
9025
9026         spin_unlock_bh(&vport->mac_list_lock);
9027
9028         hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
9029
9030         spin_lock_bh(&vport->mac_list_lock);
9031
9032         hclge_sync_from_del_list(&tmp_del_list, list);
9033
9034         spin_unlock_bh(&vport->mac_list_lock);
9035 }
9036
9037 /* remove all mac address when uninitailize */
9038 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
9039                                         enum HCLGE_MAC_ADDR_TYPE mac_type)
9040 {
9041         struct hclge_mac_node *mac_node, *tmp;
9042         struct hclge_dev *hdev = vport->back;
9043         struct list_head tmp_del_list, *list;
9044
9045         INIT_LIST_HEAD(&tmp_del_list);
9046
9047         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9048                 &vport->uc_mac_list : &vport->mc_mac_list;
9049
9050         spin_lock_bh(&vport->mac_list_lock);
9051
9052         list_for_each_entry_safe(mac_node, tmp, list, node) {
9053                 switch (mac_node->state) {
9054                 case HCLGE_MAC_TO_DEL:
9055                 case HCLGE_MAC_ACTIVE:
9056                         list_del(&mac_node->node);
9057                         list_add_tail(&mac_node->node, &tmp_del_list);
9058                         break;
9059                 case HCLGE_MAC_TO_ADD:
9060                         list_del(&mac_node->node);
9061                         kfree(mac_node);
9062                         break;
9063                 }
9064         }
9065
9066         spin_unlock_bh(&vport->mac_list_lock);
9067
9068         if (mac_type == HCLGE_MAC_ADDR_UC)
9069                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9070                                             hclge_rm_uc_addr_common);
9071         else
9072                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9073                                             hclge_rm_mc_addr_common);
9074
9075         if (!list_empty(&tmp_del_list))
9076                 dev_warn(&hdev->pdev->dev,
9077                          "uninit %s mac list for vport %u not completely.\n",
9078                          mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9079                          vport->vport_id);
9080
9081         list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9082                 list_del(&mac_node->node);
9083                 kfree(mac_node);
9084         }
9085 }
9086
9087 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9088 {
9089         struct hclge_vport *vport;
9090         int i;
9091
9092         for (i = 0; i < hdev->num_alloc_vport; i++) {
9093                 vport = &hdev->vport[i];
9094                 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9095                 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9096         }
9097 }
9098
9099 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9100                                               u16 cmdq_resp, u8 resp_code)
9101 {
9102 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
9103 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
9104 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
9105 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
9106
9107         int return_status;
9108
9109         if (cmdq_resp) {
9110                 dev_err(&hdev->pdev->dev,
9111                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9112                         cmdq_resp);
9113                 return -EIO;
9114         }
9115
9116         switch (resp_code) {
9117         case HCLGE_ETHERTYPE_SUCCESS_ADD:
9118         case HCLGE_ETHERTYPE_ALREADY_ADD:
9119                 return_status = 0;
9120                 break;
9121         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9122                 dev_err(&hdev->pdev->dev,
9123                         "add mac ethertype failed for manager table overflow.\n");
9124                 return_status = -EIO;
9125                 break;
9126         case HCLGE_ETHERTYPE_KEY_CONFLICT:
9127                 dev_err(&hdev->pdev->dev,
9128                         "add mac ethertype failed for key conflict.\n");
9129                 return_status = -EIO;
9130                 break;
9131         default:
9132                 dev_err(&hdev->pdev->dev,
9133                         "add mac ethertype failed for undefined, code=%u.\n",
9134                         resp_code);
9135                 return_status = -EIO;
9136         }
9137
9138         return return_status;
9139 }
9140
9141 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
9142                                      u8 *mac_addr)
9143 {
9144         struct hclge_mac_vlan_tbl_entry_cmd req;
9145         struct hclge_dev *hdev = vport->back;
9146         struct hclge_desc desc;
9147         u16 egress_port = 0;
9148         int i;
9149
9150         if (is_zero_ether_addr(mac_addr))
9151                 return false;
9152
9153         memset(&req, 0, sizeof(req));
9154         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
9155                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
9156         req.egress_port = cpu_to_le16(egress_port);
9157         hclge_prepare_mac_addr(&req, mac_addr, false);
9158
9159         if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
9160                 return true;
9161
9162         vf_idx += HCLGE_VF_VPORT_START_NUM;
9163         for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++)
9164                 if (i != vf_idx &&
9165                     ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
9166                         return true;
9167
9168         return false;
9169 }
9170
9171 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9172                             u8 *mac_addr)
9173 {
9174         struct hclge_vport *vport = hclge_get_vport(handle);
9175         struct hclge_dev *hdev = vport->back;
9176
9177         vport = hclge_get_vf_vport(hdev, vf);
9178         if (!vport)
9179                 return -EINVAL;
9180
9181         if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9182                 dev_info(&hdev->pdev->dev,
9183                          "Specified MAC(=%pM) is same as before, no change committed!\n",
9184                          mac_addr);
9185                 return 0;
9186         }
9187
9188         if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
9189                 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
9190                         mac_addr);
9191                 return -EEXIST;
9192         }
9193
9194         ether_addr_copy(vport->vf_info.mac, mac_addr);
9195
9196         if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9197                 dev_info(&hdev->pdev->dev,
9198                          "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
9199                          vf, mac_addr);
9200                 return hclge_inform_reset_assert_to_vf(vport);
9201         }
9202
9203         dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
9204                  vf, mac_addr);
9205         return 0;
9206 }
9207
9208 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9209                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
9210 {
9211         struct hclge_desc desc;
9212         u8 resp_code;
9213         u16 retval;
9214         int ret;
9215
9216         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9217         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9218
9219         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9220         if (ret) {
9221                 dev_err(&hdev->pdev->dev,
9222                         "add mac ethertype failed for cmd_send, ret =%d.\n",
9223                         ret);
9224                 return ret;
9225         }
9226
9227         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9228         retval = le16_to_cpu(desc.retval);
9229
9230         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9231 }
9232
9233 static int init_mgr_tbl(struct hclge_dev *hdev)
9234 {
9235         int ret;
9236         int i;
9237
9238         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9239                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9240                 if (ret) {
9241                         dev_err(&hdev->pdev->dev,
9242                                 "add mac ethertype failed, ret =%d.\n",
9243                                 ret);
9244                         return ret;
9245                 }
9246         }
9247
9248         return 0;
9249 }
9250
9251 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9252 {
9253         struct hclge_vport *vport = hclge_get_vport(handle);
9254         struct hclge_dev *hdev = vport->back;
9255
9256         ether_addr_copy(p, hdev->hw.mac.mac_addr);
9257 }
9258
9259 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9260                                        const u8 *old_addr, const u8 *new_addr)
9261 {
9262         struct list_head *list = &vport->uc_mac_list;
9263         struct hclge_mac_node *old_node, *new_node;
9264
9265         new_node = hclge_find_mac_node(list, new_addr);
9266         if (!new_node) {
9267                 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9268                 if (!new_node)
9269                         return -ENOMEM;
9270
9271                 new_node->state = HCLGE_MAC_TO_ADD;
9272                 ether_addr_copy(new_node->mac_addr, new_addr);
9273                 list_add(&new_node->node, list);
9274         } else {
9275                 if (new_node->state == HCLGE_MAC_TO_DEL)
9276                         new_node->state = HCLGE_MAC_ACTIVE;
9277
9278                 /* make sure the new addr is in the list head, avoid dev
9279                  * addr may be not re-added into mac table for the umv space
9280                  * limitation after global/imp reset which will clear mac
9281                  * table by hardware.
9282                  */
9283                 list_move(&new_node->node, list);
9284         }
9285
9286         if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9287                 old_node = hclge_find_mac_node(list, old_addr);
9288                 if (old_node) {
9289                         if (old_node->state == HCLGE_MAC_TO_ADD) {
9290                                 list_del(&old_node->node);
9291                                 kfree(old_node);
9292                         } else {
9293                                 old_node->state = HCLGE_MAC_TO_DEL;
9294                         }
9295                 }
9296         }
9297
9298         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9299
9300         return 0;
9301 }
9302
9303 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
9304                               bool is_first)
9305 {
9306         const unsigned char *new_addr = (const unsigned char *)p;
9307         struct hclge_vport *vport = hclge_get_vport(handle);
9308         struct hclge_dev *hdev = vport->back;
9309         unsigned char *old_addr = NULL;
9310         int ret;
9311
9312         /* mac addr check */
9313         if (is_zero_ether_addr(new_addr) ||
9314             is_broadcast_ether_addr(new_addr) ||
9315             is_multicast_ether_addr(new_addr)) {
9316                 dev_err(&hdev->pdev->dev,
9317                         "change uc mac err! invalid mac: %pM.\n",
9318                          new_addr);
9319                 return -EINVAL;
9320         }
9321
9322         ret = hclge_pause_addr_cfg(hdev, new_addr);
9323         if (ret) {
9324                 dev_err(&hdev->pdev->dev,
9325                         "failed to configure mac pause address, ret = %d\n",
9326                         ret);
9327                 return ret;
9328         }
9329
9330         if (!is_first)
9331                 old_addr = hdev->hw.mac.mac_addr;
9332
9333         spin_lock_bh(&vport->mac_list_lock);
9334         ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9335         if (ret) {
9336                 dev_err(&hdev->pdev->dev,
9337                         "failed to change the mac addr:%pM, ret = %d\n",
9338                         new_addr, ret);
9339                 spin_unlock_bh(&vport->mac_list_lock);
9340
9341                 if (!is_first)
9342                         hclge_pause_addr_cfg(hdev, old_addr);
9343
9344                 return ret;
9345         }
9346         /* we must update dev addr with spin lock protect, preventing dev addr
9347          * being removed by set_rx_mode path.
9348          */
9349         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9350         spin_unlock_bh(&vport->mac_list_lock);
9351
9352         hclge_task_schedule(hdev, 0);
9353
9354         return 0;
9355 }
9356
9357 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9358 {
9359         struct mii_ioctl_data *data = if_mii(ifr);
9360
9361         if (!hnae3_dev_phy_imp_supported(hdev))
9362                 return -EOPNOTSUPP;
9363
9364         switch (cmd) {
9365         case SIOCGMIIPHY:
9366                 data->phy_id = hdev->hw.mac.phy_addr;
9367                 /* this command reads phy id and register at the same time */
9368                 fallthrough;
9369         case SIOCGMIIREG:
9370                 data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9371                 return 0;
9372
9373         case SIOCSMIIREG:
9374                 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9375         default:
9376                 return -EOPNOTSUPP;
9377         }
9378 }
9379
9380 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9381                           int cmd)
9382 {
9383         struct hclge_vport *vport = hclge_get_vport(handle);
9384         struct hclge_dev *hdev = vport->back;
9385
9386         if (!hdev->hw.mac.phydev)
9387                 return hclge_mii_ioctl(hdev, ifr, cmd);
9388
9389         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9390 }
9391
9392 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
9393                                              bool bypass_en)
9394 {
9395         struct hclge_port_vlan_filter_bypass_cmd *req;
9396         struct hclge_desc desc;
9397         int ret;
9398
9399         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
9400         req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
9401         req->vf_id = vf_id;
9402         hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
9403                       bypass_en ? 1 : 0);
9404
9405         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9406         if (ret)
9407                 dev_err(&hdev->pdev->dev,
9408                         "failed to set vport%u port vlan filter bypass state, ret = %d.\n",
9409                         vf_id, ret);
9410
9411         return ret;
9412 }
9413
9414 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9415                                       u8 fe_type, bool filter_en, u8 vf_id)
9416 {
9417         struct hclge_vlan_filter_ctrl_cmd *req;
9418         struct hclge_desc desc;
9419         int ret;
9420
9421         /* read current vlan filter parameter */
9422         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9423         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9424         req->vlan_type = vlan_type;
9425         req->vf_id = vf_id;
9426
9427         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9428         if (ret) {
9429                 dev_err(&hdev->pdev->dev,
9430                         "failed to get vlan filter config, ret = %d.\n", ret);
9431                 return ret;
9432         }
9433
9434         /* modify and write new config parameter */
9435         hclge_cmd_reuse_desc(&desc, false);
9436         req->vlan_fe = filter_en ?
9437                         (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9438
9439         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9440         if (ret)
9441                 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
9442                         ret);
9443
9444         return ret;
9445 }
9446
9447 #define HCLGE_FILTER_TYPE_VF            0
9448 #define HCLGE_FILTER_TYPE_PORT          1
9449 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
9450 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
9451 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
9452 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
9453 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
9454 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
9455                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
9456 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
9457                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
9458
9459 static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
9460 {
9461         struct hclge_dev *hdev = vport->back;
9462         struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
9463         int ret;
9464
9465         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9466                 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9467                                                   HCLGE_FILTER_FE_EGRESS_V1_B,
9468                                                   enable, vport->vport_id);
9469
9470         ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9471                                          HCLGE_FILTER_FE_EGRESS, enable,
9472                                          vport->vport_id);
9473         if (ret)
9474                 return ret;
9475
9476         if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps))
9477                 ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
9478                                                         !enable);
9479         else if (!vport->vport_id)
9480                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9481                                                  HCLGE_FILTER_FE_INGRESS,
9482                                                  enable, 0);
9483
9484         return ret;
9485 }
9486
9487 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
9488 {
9489         struct hnae3_handle *handle = &vport->nic;
9490         struct hclge_vport_vlan_cfg *vlan, *tmp;
9491         struct hclge_dev *hdev = vport->back;
9492
9493         if (vport->vport_id) {
9494                 if (vport->port_base_vlan_cfg.state !=
9495                         HNAE3_PORT_BASE_VLAN_DISABLE)
9496                         return true;
9497
9498                 if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
9499                         return false;
9500         } else if (handle->netdev_flags & HNAE3_USER_UPE) {
9501                 return false;
9502         }
9503
9504         if (!vport->req_vlan_fltr_en)
9505                 return false;
9506
9507         /* compatible with former device, always enable vlan filter */
9508         if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
9509                 return true;
9510
9511         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
9512                 if (vlan->vlan_id != 0)
9513                         return true;
9514
9515         return false;
9516 }
9517
9518 int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
9519 {
9520         struct hclge_dev *hdev = vport->back;
9521         bool need_en;
9522         int ret;
9523
9524         mutex_lock(&hdev->vport_lock);
9525
9526         vport->req_vlan_fltr_en = request_en;
9527
9528         need_en = hclge_need_enable_vport_vlan_filter(vport);
9529         if (need_en == vport->cur_vlan_fltr_en) {
9530                 mutex_unlock(&hdev->vport_lock);
9531                 return 0;
9532         }
9533
9534         ret = hclge_set_vport_vlan_filter(vport, need_en);
9535         if (ret) {
9536                 mutex_unlock(&hdev->vport_lock);
9537                 return ret;
9538         }
9539
9540         vport->cur_vlan_fltr_en = need_en;
9541
9542         mutex_unlock(&hdev->vport_lock);
9543
9544         return 0;
9545 }
9546
9547 static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9548 {
9549         struct hclge_vport *vport = hclge_get_vport(handle);
9550
9551         return hclge_enable_vport_vlan_filter(vport, enable);
9552 }
9553
9554 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9555                                         bool is_kill, u16 vlan,
9556                                         struct hclge_desc *desc)
9557 {
9558         struct hclge_vlan_filter_vf_cfg_cmd *req0;
9559         struct hclge_vlan_filter_vf_cfg_cmd *req1;
9560         u8 vf_byte_val;
9561         u8 vf_byte_off;
9562         int ret;
9563
9564         hclge_cmd_setup_basic_desc(&desc[0],
9565                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9566         hclge_cmd_setup_basic_desc(&desc[1],
9567                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9568
9569         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9570
9571         vf_byte_off = vfid / 8;
9572         vf_byte_val = 1 << (vfid % 8);
9573
9574         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9575         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9576
9577         req0->vlan_id  = cpu_to_le16(vlan);
9578         req0->vlan_cfg = is_kill;
9579
9580         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9581                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9582         else
9583                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9584
9585         ret = hclge_cmd_send(&hdev->hw, desc, 2);
9586         if (ret) {
9587                 dev_err(&hdev->pdev->dev,
9588                         "Send vf vlan command fail, ret =%d.\n",
9589                         ret);
9590                 return ret;
9591         }
9592
9593         return 0;
9594 }
9595
9596 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9597                                           bool is_kill, struct hclge_desc *desc)
9598 {
9599         struct hclge_vlan_filter_vf_cfg_cmd *req;
9600
9601         req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9602
9603         if (!is_kill) {
9604 #define HCLGE_VF_VLAN_NO_ENTRY  2
9605                 if (!req->resp_code || req->resp_code == 1)
9606                         return 0;
9607
9608                 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9609                         set_bit(vfid, hdev->vf_vlan_full);
9610                         dev_warn(&hdev->pdev->dev,
9611                                  "vf vlan table is full, vf vlan filter is disabled\n");
9612                         return 0;
9613                 }
9614
9615                 dev_err(&hdev->pdev->dev,
9616                         "Add vf vlan filter fail, ret =%u.\n",
9617                         req->resp_code);
9618         } else {
9619 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
9620                 if (!req->resp_code)
9621                         return 0;
9622
9623                 /* vf vlan filter is disabled when vf vlan table is full,
9624                  * then new vlan id will not be added into vf vlan table.
9625                  * Just return 0 without warning, avoid massive verbose
9626                  * print logs when unload.
9627                  */
9628                 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9629                         return 0;
9630
9631                 dev_err(&hdev->pdev->dev,
9632                         "Kill vf vlan filter fail, ret =%u.\n",
9633                         req->resp_code);
9634         }
9635
9636         return -EIO;
9637 }
9638
9639 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9640                                     bool is_kill, u16 vlan)
9641 {
9642         struct hclge_vport *vport = &hdev->vport[vfid];
9643         struct hclge_desc desc[2];
9644         int ret;
9645
9646         /* if vf vlan table is full, firmware will close vf vlan filter, it
9647          * is unable and unnecessary to add new vlan id to vf vlan filter.
9648          * If spoof check is enable, and vf vlan is full, it shouldn't add
9649          * new vlan, because tx packets with these vlan id will be dropped.
9650          */
9651         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9652                 if (vport->vf_info.spoofchk && vlan) {
9653                         dev_err(&hdev->pdev->dev,
9654                                 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
9655                         return -EPERM;
9656                 }
9657                 return 0;
9658         }
9659
9660         ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9661         if (ret)
9662                 return ret;
9663
9664         return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9665 }
9666
9667 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9668                                       u16 vlan_id, bool is_kill)
9669 {
9670         struct hclge_vlan_filter_pf_cfg_cmd *req;
9671         struct hclge_desc desc;
9672         u8 vlan_offset_byte_val;
9673         u8 vlan_offset_byte;
9674         u8 vlan_offset_160;
9675         int ret;
9676
9677         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9678
9679         vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9680         vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9681                            HCLGE_VLAN_BYTE_SIZE;
9682         vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9683
9684         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9685         req->vlan_offset = vlan_offset_160;
9686         req->vlan_cfg = is_kill;
9687         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9688
9689         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9690         if (ret)
9691                 dev_err(&hdev->pdev->dev,
9692                         "port vlan command, send fail, ret =%d.\n", ret);
9693         return ret;
9694 }
9695
9696 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9697                                     u16 vport_id, u16 vlan_id,
9698                                     bool is_kill)
9699 {
9700         u16 vport_idx, vport_num = 0;
9701         int ret;
9702
9703         if (is_kill && !vlan_id)
9704                 return 0;
9705
9706         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9707         if (ret) {
9708                 dev_err(&hdev->pdev->dev,
9709                         "Set %u vport vlan filter config fail, ret =%d.\n",
9710                         vport_id, ret);
9711                 return ret;
9712         }
9713
9714         /* vlan 0 may be added twice when 8021q module is enabled */
9715         if (!is_kill && !vlan_id &&
9716             test_bit(vport_id, hdev->vlan_table[vlan_id]))
9717                 return 0;
9718
9719         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9720                 dev_err(&hdev->pdev->dev,
9721                         "Add port vlan failed, vport %u is already in vlan %u\n",
9722                         vport_id, vlan_id);
9723                 return -EINVAL;
9724         }
9725
9726         if (is_kill &&
9727             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9728                 dev_err(&hdev->pdev->dev,
9729                         "Delete port vlan failed, vport %u is not in vlan %u\n",
9730                         vport_id, vlan_id);
9731                 return -EINVAL;
9732         }
9733
9734         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9735                 vport_num++;
9736
9737         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9738                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9739                                                  is_kill);
9740
9741         return ret;
9742 }
9743
9744 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9745 {
9746         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9747         struct hclge_vport_vtag_tx_cfg_cmd *req;
9748         struct hclge_dev *hdev = vport->back;
9749         struct hclge_desc desc;
9750         u16 bmap_index;
9751         int status;
9752
9753         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9754
9755         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9756         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9757         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9758         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9759                       vcfg->accept_tag1 ? 1 : 0);
9760         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9761                       vcfg->accept_untag1 ? 1 : 0);
9762         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9763                       vcfg->accept_tag2 ? 1 : 0);
9764         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9765                       vcfg->accept_untag2 ? 1 : 0);
9766         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9767                       vcfg->insert_tag1_en ? 1 : 0);
9768         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9769                       vcfg->insert_tag2_en ? 1 : 0);
9770         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9771                       vcfg->tag_shift_mode_en ? 1 : 0);
9772         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9773
9774         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9775         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9776                         HCLGE_VF_NUM_PER_BYTE;
9777         req->vf_bitmap[bmap_index] =
9778                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9779
9780         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9781         if (status)
9782                 dev_err(&hdev->pdev->dev,
9783                         "Send port txvlan cfg command fail, ret =%d\n",
9784                         status);
9785
9786         return status;
9787 }
9788
9789 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9790 {
9791         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9792         struct hclge_vport_vtag_rx_cfg_cmd *req;
9793         struct hclge_dev *hdev = vport->back;
9794         struct hclge_desc desc;
9795         u16 bmap_index;
9796         int status;
9797
9798         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9799
9800         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9801         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9802                       vcfg->strip_tag1_en ? 1 : 0);
9803         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9804                       vcfg->strip_tag2_en ? 1 : 0);
9805         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9806                       vcfg->vlan1_vlan_prionly ? 1 : 0);
9807         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9808                       vcfg->vlan2_vlan_prionly ? 1 : 0);
9809         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9810                       vcfg->strip_tag1_discard_en ? 1 : 0);
9811         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9812                       vcfg->strip_tag2_discard_en ? 1 : 0);
9813
9814         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9815         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9816                         HCLGE_VF_NUM_PER_BYTE;
9817         req->vf_bitmap[bmap_index] =
9818                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9819
9820         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9821         if (status)
9822                 dev_err(&hdev->pdev->dev,
9823                         "Send port rxvlan cfg command fail, ret =%d\n",
9824                         status);
9825
9826         return status;
9827 }
9828
9829 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9830                                   u16 port_base_vlan_state,
9831                                   u16 vlan_tag, u8 qos)
9832 {
9833         int ret;
9834
9835         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9836                 vport->txvlan_cfg.accept_tag1 = true;
9837                 vport->txvlan_cfg.insert_tag1_en = false;
9838                 vport->txvlan_cfg.default_tag1 = 0;
9839         } else {
9840                 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9841
9842                 vport->txvlan_cfg.accept_tag1 =
9843                         ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9844                 vport->txvlan_cfg.insert_tag1_en = true;
9845                 vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
9846                                                  vlan_tag;
9847         }
9848
9849         vport->txvlan_cfg.accept_untag1 = true;
9850
9851         /* accept_tag2 and accept_untag2 are not supported on
9852          * pdev revision(0x20), new revision support them,
9853          * this two fields can not be configured by user.
9854          */
9855         vport->txvlan_cfg.accept_tag2 = true;
9856         vport->txvlan_cfg.accept_untag2 = true;
9857         vport->txvlan_cfg.insert_tag2_en = false;
9858         vport->txvlan_cfg.default_tag2 = 0;
9859         vport->txvlan_cfg.tag_shift_mode_en = true;
9860
9861         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9862                 vport->rxvlan_cfg.strip_tag1_en = false;
9863                 vport->rxvlan_cfg.strip_tag2_en =
9864                                 vport->rxvlan_cfg.rx_vlan_offload_en;
9865                 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9866         } else {
9867                 vport->rxvlan_cfg.strip_tag1_en =
9868                                 vport->rxvlan_cfg.rx_vlan_offload_en;
9869                 vport->rxvlan_cfg.strip_tag2_en = true;
9870                 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9871         }
9872
9873         vport->rxvlan_cfg.strip_tag1_discard_en = false;
9874         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9875         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9876
9877         ret = hclge_set_vlan_tx_offload_cfg(vport);
9878         if (ret)
9879                 return ret;
9880
9881         return hclge_set_vlan_rx_offload_cfg(vport);
9882 }
9883
9884 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9885 {
9886         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9887         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9888         struct hclge_desc desc;
9889         int status;
9890
9891         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9892         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9893         rx_req->ot_fst_vlan_type =
9894                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9895         rx_req->ot_sec_vlan_type =
9896                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9897         rx_req->in_fst_vlan_type =
9898                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9899         rx_req->in_sec_vlan_type =
9900                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9901
9902         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9903         if (status) {
9904                 dev_err(&hdev->pdev->dev,
9905                         "Send rxvlan protocol type command fail, ret =%d\n",
9906                         status);
9907                 return status;
9908         }
9909
9910         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9911
9912         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9913         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9914         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9915
9916         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9917         if (status)
9918                 dev_err(&hdev->pdev->dev,
9919                         "Send txvlan protocol type command fail, ret =%d\n",
9920                         status);
9921
9922         return status;
9923 }
9924
9925 static int hclge_init_vlan_config(struct hclge_dev *hdev)
9926 {
9927 #define HCLGE_DEF_VLAN_TYPE             0x8100
9928
9929         struct hnae3_handle *handle = &hdev->vport[0].nic;
9930         struct hclge_vport *vport;
9931         int ret;
9932         int i;
9933
9934         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9935                 /* for revision 0x21, vf vlan filter is per function */
9936                 for (i = 0; i < hdev->num_alloc_vport; i++) {
9937                         vport = &hdev->vport[i];
9938                         ret = hclge_set_vlan_filter_ctrl(hdev,
9939                                                          HCLGE_FILTER_TYPE_VF,
9940                                                          HCLGE_FILTER_FE_EGRESS,
9941                                                          true,
9942                                                          vport->vport_id);
9943                         if (ret)
9944                                 return ret;
9945                         vport->cur_vlan_fltr_en = true;
9946                 }
9947
9948                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9949                                                  HCLGE_FILTER_FE_INGRESS, true,
9950                                                  0);
9951                 if (ret)
9952                         return ret;
9953         } else {
9954                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9955                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
9956                                                  true, 0);
9957                 if (ret)
9958                         return ret;
9959         }
9960
9961         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9962         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9963         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9964         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9965         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
9966         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
9967
9968         ret = hclge_set_vlan_protocol_type(hdev);
9969         if (ret)
9970                 return ret;
9971
9972         for (i = 0; i < hdev->num_alloc_vport; i++) {
9973                 u16 vlan_tag;
9974                 u8 qos;
9975
9976                 vport = &hdev->vport[i];
9977                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9978                 qos = vport->port_base_vlan_cfg.vlan_info.qos;
9979
9980                 ret = hclge_vlan_offload_cfg(vport,
9981                                              vport->port_base_vlan_cfg.state,
9982                                              vlan_tag, qos);
9983                 if (ret)
9984                         return ret;
9985         }
9986
9987         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
9988 }
9989
9990 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9991                                        bool writen_to_tbl)
9992 {
9993         struct hclge_vport_vlan_cfg *vlan;
9994
9995         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
9996         if (!vlan)
9997                 return;
9998
9999         vlan->hd_tbl_status = writen_to_tbl;
10000         vlan->vlan_id = vlan_id;
10001
10002         list_add_tail(&vlan->node, &vport->vlan_list);
10003 }
10004
10005 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
10006 {
10007         struct hclge_vport_vlan_cfg *vlan, *tmp;
10008         struct hclge_dev *hdev = vport->back;
10009         int ret;
10010
10011         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10012                 if (!vlan->hd_tbl_status) {
10013                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10014                                                        vport->vport_id,
10015                                                        vlan->vlan_id, false);
10016                         if (ret) {
10017                                 dev_err(&hdev->pdev->dev,
10018                                         "restore vport vlan list failed, ret=%d\n",
10019                                         ret);
10020                                 return ret;
10021                         }
10022                 }
10023                 vlan->hd_tbl_status = true;
10024         }
10025
10026         return 0;
10027 }
10028
10029 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10030                                       bool is_write_tbl)
10031 {
10032         struct hclge_vport_vlan_cfg *vlan, *tmp;
10033         struct hclge_dev *hdev = vport->back;
10034
10035         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10036                 if (vlan->vlan_id == vlan_id) {
10037                         if (is_write_tbl && vlan->hd_tbl_status)
10038                                 hclge_set_vlan_filter_hw(hdev,
10039                                                          htons(ETH_P_8021Q),
10040                                                          vport->vport_id,
10041                                                          vlan_id,
10042                                                          true);
10043
10044                         list_del(&vlan->node);
10045                         kfree(vlan);
10046                         break;
10047                 }
10048         }
10049 }
10050
10051 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
10052 {
10053         struct hclge_vport_vlan_cfg *vlan, *tmp;
10054         struct hclge_dev *hdev = vport->back;
10055
10056         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10057                 if (vlan->hd_tbl_status)
10058                         hclge_set_vlan_filter_hw(hdev,
10059                                                  htons(ETH_P_8021Q),
10060                                                  vport->vport_id,
10061                                                  vlan->vlan_id,
10062                                                  true);
10063
10064                 vlan->hd_tbl_status = false;
10065                 if (is_del_list) {
10066                         list_del(&vlan->node);
10067                         kfree(vlan);
10068                 }
10069         }
10070         clear_bit(vport->vport_id, hdev->vf_vlan_full);
10071 }
10072
10073 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
10074 {
10075         struct hclge_vport_vlan_cfg *vlan, *tmp;
10076         struct hclge_vport *vport;
10077         int i;
10078
10079         for (i = 0; i < hdev->num_alloc_vport; i++) {
10080                 vport = &hdev->vport[i];
10081                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10082                         list_del(&vlan->node);
10083                         kfree(vlan);
10084                 }
10085         }
10086 }
10087
10088 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
10089 {
10090         struct hclge_vport_vlan_cfg *vlan, *tmp;
10091         struct hclge_dev *hdev = vport->back;
10092         u16 vlan_proto;
10093         u16 vlan_id;
10094         u16 state;
10095         int ret;
10096
10097         vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
10098         vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10099         state = vport->port_base_vlan_cfg.state;
10100
10101         if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
10102                 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
10103                 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
10104                                          vport->vport_id, vlan_id,
10105                                          false);
10106                 return;
10107         }
10108
10109         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10110                 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10111                                                vport->vport_id,
10112                                                vlan->vlan_id, false);
10113                 if (ret)
10114                         break;
10115                 vlan->hd_tbl_status = true;
10116         }
10117 }
10118
10119 /* For global reset and imp reset, hardware will clear the mac table,
10120  * so we change the mac address state from ACTIVE to TO_ADD, then they
10121  * can be restored in the service task after reset complete. Furtherly,
10122  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
10123  * be restored after reset, so just remove these mac nodes from mac_list.
10124  */
10125 static void hclge_mac_node_convert_for_reset(struct list_head *list)
10126 {
10127         struct hclge_mac_node *mac_node, *tmp;
10128
10129         list_for_each_entry_safe(mac_node, tmp, list, node) {
10130                 if (mac_node->state == HCLGE_MAC_ACTIVE) {
10131                         mac_node->state = HCLGE_MAC_TO_ADD;
10132                 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10133                         list_del(&mac_node->node);
10134                         kfree(mac_node);
10135                 }
10136         }
10137 }
10138
10139 void hclge_restore_mac_table_common(struct hclge_vport *vport)
10140 {
10141         spin_lock_bh(&vport->mac_list_lock);
10142
10143         hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10144         hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10145         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10146
10147         spin_unlock_bh(&vport->mac_list_lock);
10148 }
10149
10150 static void hclge_restore_hw_table(struct hclge_dev *hdev)
10151 {
10152         struct hclge_vport *vport = &hdev->vport[0];
10153         struct hnae3_handle *handle = &vport->nic;
10154
10155         hclge_restore_mac_table_common(vport);
10156         hclge_restore_vport_vlan_table(vport);
10157         set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10158         hclge_restore_fd_entries(handle);
10159 }
10160
10161 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10162 {
10163         struct hclge_vport *vport = hclge_get_vport(handle);
10164
10165         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10166                 vport->rxvlan_cfg.strip_tag1_en = false;
10167                 vport->rxvlan_cfg.strip_tag2_en = enable;
10168                 vport->rxvlan_cfg.strip_tag2_discard_en = false;
10169         } else {
10170                 vport->rxvlan_cfg.strip_tag1_en = enable;
10171                 vport->rxvlan_cfg.strip_tag2_en = true;
10172                 vport->rxvlan_cfg.strip_tag2_discard_en = true;
10173         }
10174
10175         vport->rxvlan_cfg.strip_tag1_discard_en = false;
10176         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10177         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10178         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10179
10180         return hclge_set_vlan_rx_offload_cfg(vport);
10181 }
10182
10183 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
10184 {
10185         struct hclge_dev *hdev = vport->back;
10186
10187         if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
10188                 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
10189 }
10190
10191 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10192                                             u16 port_base_vlan_state,
10193                                             struct hclge_vlan_info *new_info,
10194                                             struct hclge_vlan_info *old_info)
10195 {
10196         struct hclge_dev *hdev = vport->back;
10197         int ret;
10198
10199         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10200                 hclge_rm_vport_all_vlan_table(vport, false);
10201                 /* force clear VLAN 0 */
10202                 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
10203                 if (ret)
10204                         return ret;
10205                 return hclge_set_vlan_filter_hw(hdev,
10206                                                  htons(new_info->vlan_proto),
10207                                                  vport->vport_id,
10208                                                  new_info->vlan_tag,
10209                                                  false);
10210         }
10211
10212         /* force add VLAN 0 */
10213         ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
10214         if (ret)
10215                 return ret;
10216
10217         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10218                                        vport->vport_id, old_info->vlan_tag,
10219                                        true);
10220         if (ret)
10221                 return ret;
10222
10223         return hclge_add_vport_all_vlan_table(vport);
10224 }
10225
10226 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
10227                                           const struct hclge_vlan_info *old_cfg)
10228 {
10229         if (new_cfg->vlan_tag != old_cfg->vlan_tag)
10230                 return true;
10231
10232         if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
10233                 return true;
10234
10235         return false;
10236 }
10237
10238 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10239                                     struct hclge_vlan_info *vlan_info)
10240 {
10241         struct hnae3_handle *nic = &vport->nic;
10242         struct hclge_vlan_info *old_vlan_info;
10243         struct hclge_dev *hdev = vport->back;
10244         int ret;
10245
10246         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10247
10248         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
10249                                      vlan_info->qos);
10250         if (ret)
10251                 return ret;
10252
10253         if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
10254                 goto out;
10255
10256         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
10257                 /* add new VLAN tag */
10258                 ret = hclge_set_vlan_filter_hw(hdev,
10259                                                htons(vlan_info->vlan_proto),
10260                                                vport->vport_id,
10261                                                vlan_info->vlan_tag,
10262                                                false);
10263                 if (ret)
10264                         return ret;
10265
10266                 /* remove old VLAN tag */
10267                 if (old_vlan_info->vlan_tag == 0)
10268                         ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
10269                                                        true, 0);
10270                 else
10271                         ret = hclge_set_vlan_filter_hw(hdev,
10272                                                        htons(ETH_P_8021Q),
10273                                                        vport->vport_id,
10274                                                        old_vlan_info->vlan_tag,
10275                                                        true);
10276                 if (ret) {
10277                         dev_err(&hdev->pdev->dev,
10278                                 "failed to clear vport%u port base vlan %u, ret = %d.\n",
10279                                 vport->vport_id, old_vlan_info->vlan_tag, ret);
10280                         return ret;
10281                 }
10282
10283                 goto out;
10284         }
10285
10286         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10287                                                old_vlan_info);
10288         if (ret)
10289                 return ret;
10290
10291 out:
10292         vport->port_base_vlan_cfg.state = state;
10293         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10294                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10295         else
10296                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10297
10298         vport->port_base_vlan_cfg.vlan_info = *vlan_info;
10299         hclge_set_vport_vlan_fltr_change(vport);
10300
10301         return 0;
10302 }
10303
10304 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10305                                           enum hnae3_port_base_vlan_state state,
10306                                           u16 vlan, u8 qos)
10307 {
10308         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10309                 if (!vlan && !qos)
10310                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10311
10312                 return HNAE3_PORT_BASE_VLAN_ENABLE;
10313         }
10314
10315         if (!vlan && !qos)
10316                 return HNAE3_PORT_BASE_VLAN_DISABLE;
10317
10318         if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
10319             vport->port_base_vlan_cfg.vlan_info.qos == qos)
10320                 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10321
10322         return HNAE3_PORT_BASE_VLAN_MODIFY;
10323 }
10324
10325 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10326                                     u16 vlan, u8 qos, __be16 proto)
10327 {
10328         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10329         struct hclge_vport *vport = hclge_get_vport(handle);
10330         struct hclge_dev *hdev = vport->back;
10331         struct hclge_vlan_info vlan_info;
10332         u16 state;
10333         int ret;
10334
10335         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10336                 return -EOPNOTSUPP;
10337
10338         vport = hclge_get_vf_vport(hdev, vfid);
10339         if (!vport)
10340                 return -EINVAL;
10341
10342         /* qos is a 3 bits value, so can not be bigger than 7 */
10343         if (vlan > VLAN_N_VID - 1 || qos > 7)
10344                 return -EINVAL;
10345         if (proto != htons(ETH_P_8021Q))
10346                 return -EPROTONOSUPPORT;
10347
10348         state = hclge_get_port_base_vlan_state(vport,
10349                                                vport->port_base_vlan_cfg.state,
10350                                                vlan, qos);
10351         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10352                 return 0;
10353
10354         vlan_info.vlan_tag = vlan;
10355         vlan_info.qos = qos;
10356         vlan_info.vlan_proto = ntohs(proto);
10357
10358         ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10359         if (ret) {
10360                 dev_err(&hdev->pdev->dev,
10361                         "failed to update port base vlan for vf %d, ret = %d\n",
10362                         vfid, ret);
10363                 return ret;
10364         }
10365
10366         /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10367          * VLAN state.
10368          */
10369         if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
10370             test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10371                 hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10372                                                   vport->vport_id, state,
10373                                                   &vlan_info);
10374
10375         return 0;
10376 }
10377
10378 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10379 {
10380         struct hclge_vlan_info *vlan_info;
10381         struct hclge_vport *vport;
10382         int ret;
10383         int vf;
10384
10385         /* clear port base vlan for all vf */
10386         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10387                 vport = &hdev->vport[vf];
10388                 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10389
10390                 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10391                                                vport->vport_id,
10392                                                vlan_info->vlan_tag, true);
10393                 if (ret)
10394                         dev_err(&hdev->pdev->dev,
10395                                 "failed to clear vf vlan for vf%d, ret = %d\n",
10396                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
10397         }
10398 }
10399
10400 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10401                           u16 vlan_id, bool is_kill)
10402 {
10403         struct hclge_vport *vport = hclge_get_vport(handle);
10404         struct hclge_dev *hdev = vport->back;
10405         bool writen_to_tbl = false;
10406         int ret = 0;
10407
10408         /* When device is resetting or reset failed, firmware is unable to
10409          * handle mailbox. Just record the vlan id, and remove it after
10410          * reset finished.
10411          */
10412         if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10413              test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10414                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10415                 return -EBUSY;
10416         }
10417
10418         /* when port base vlan enabled, we use port base vlan as the vlan
10419          * filter entry. In this case, we don't update vlan filter table
10420          * when user add new vlan or remove exist vlan, just update the vport
10421          * vlan list. The vlan id in vlan list will be writen in vlan filter
10422          * table until port base vlan disabled
10423          */
10424         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10425                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10426                                                vlan_id, is_kill);
10427                 writen_to_tbl = true;
10428         }
10429
10430         if (!ret) {
10431                 if (is_kill)
10432                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
10433                 else
10434                         hclge_add_vport_vlan_table(vport, vlan_id,
10435                                                    writen_to_tbl);
10436         } else if (is_kill) {
10437                 /* when remove hw vlan filter failed, record the vlan id,
10438                  * and try to remove it from hw later, to be consistence
10439                  * with stack
10440                  */
10441                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10442         }
10443
10444         hclge_set_vport_vlan_fltr_change(vport);
10445
10446         return ret;
10447 }
10448
10449 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
10450 {
10451         struct hclge_vport *vport;
10452         int ret;
10453         u16 i;
10454
10455         for (i = 0; i < hdev->num_alloc_vport; i++) {
10456                 vport = &hdev->vport[i];
10457                 if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10458                                         &vport->state))
10459                         continue;
10460
10461                 ret = hclge_enable_vport_vlan_filter(vport,
10462                                                      vport->req_vlan_fltr_en);
10463                 if (ret) {
10464                         dev_err(&hdev->pdev->dev,
10465                                 "failed to sync vlan filter state for vport%u, ret = %d\n",
10466                                 vport->vport_id, ret);
10467                         set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10468                                 &vport->state);
10469                         return;
10470                 }
10471         }
10472 }
10473
10474 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10475 {
10476 #define HCLGE_MAX_SYNC_COUNT    60
10477
10478         int i, ret, sync_cnt = 0;
10479         u16 vlan_id;
10480
10481         /* start from vport 1 for PF is always alive */
10482         for (i = 0; i < hdev->num_alloc_vport; i++) {
10483                 struct hclge_vport *vport = &hdev->vport[i];
10484
10485                 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10486                                          VLAN_N_VID);
10487                 while (vlan_id != VLAN_N_VID) {
10488                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10489                                                        vport->vport_id, vlan_id,
10490                                                        true);
10491                         if (ret && ret != -EINVAL)
10492                                 return;
10493
10494                         clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10495                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
10496                         hclge_set_vport_vlan_fltr_change(vport);
10497
10498                         sync_cnt++;
10499                         if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
10500                                 return;
10501
10502                         vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10503                                                  VLAN_N_VID);
10504                 }
10505         }
10506
10507         hclge_sync_vlan_fltr_state(hdev);
10508 }
10509
10510 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10511 {
10512         struct hclge_config_max_frm_size_cmd *req;
10513         struct hclge_desc desc;
10514
10515         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10516
10517         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10518         req->max_frm_size = cpu_to_le16(new_mps);
10519         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10520
10521         return hclge_cmd_send(&hdev->hw, &desc, 1);
10522 }
10523
10524 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10525 {
10526         struct hclge_vport *vport = hclge_get_vport(handle);
10527
10528         return hclge_set_vport_mtu(vport, new_mtu);
10529 }
10530
10531 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10532 {
10533         struct hclge_dev *hdev = vport->back;
10534         int i, max_frm_size, ret;
10535
10536         /* HW supprt 2 layer vlan */
10537         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10538         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10539             max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10540                 return -EINVAL;
10541
10542         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10543         mutex_lock(&hdev->vport_lock);
10544         /* VF's mps must fit within hdev->mps */
10545         if (vport->vport_id && max_frm_size > hdev->mps) {
10546                 mutex_unlock(&hdev->vport_lock);
10547                 return -EINVAL;
10548         } else if (vport->vport_id) {
10549                 vport->mps = max_frm_size;
10550                 mutex_unlock(&hdev->vport_lock);
10551                 return 0;
10552         }
10553
10554         /* PF's mps must be greater then VF's mps */
10555         for (i = 1; i < hdev->num_alloc_vport; i++)
10556                 if (max_frm_size < hdev->vport[i].mps) {
10557                         mutex_unlock(&hdev->vport_lock);
10558                         return -EINVAL;
10559                 }
10560
10561         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10562
10563         ret = hclge_set_mac_mtu(hdev, max_frm_size);
10564         if (ret) {
10565                 dev_err(&hdev->pdev->dev,
10566                         "Change mtu fail, ret =%d\n", ret);
10567                 goto out;
10568         }
10569
10570         hdev->mps = max_frm_size;
10571         vport->mps = max_frm_size;
10572
10573         ret = hclge_buffer_alloc(hdev);
10574         if (ret)
10575                 dev_err(&hdev->pdev->dev,
10576                         "Allocate buffer fail, ret =%d\n", ret);
10577
10578 out:
10579         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10580         mutex_unlock(&hdev->vport_lock);
10581         return ret;
10582 }
10583
10584 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10585                                     bool enable)
10586 {
10587         struct hclge_reset_tqp_queue_cmd *req;
10588         struct hclge_desc desc;
10589         int ret;
10590
10591         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10592
10593         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10594         req->tqp_id = cpu_to_le16(queue_id);
10595         if (enable)
10596                 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10597
10598         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10599         if (ret) {
10600                 dev_err(&hdev->pdev->dev,
10601                         "Send tqp reset cmd error, status =%d\n", ret);
10602                 return ret;
10603         }
10604
10605         return 0;
10606 }
10607
10608 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
10609 {
10610         struct hclge_reset_tqp_queue_cmd *req;
10611         struct hclge_desc desc;
10612         int ret;
10613
10614         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10615
10616         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10617         req->tqp_id = cpu_to_le16(queue_id);
10618
10619         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10620         if (ret) {
10621                 dev_err(&hdev->pdev->dev,
10622                         "Get reset status error, status =%d\n", ret);
10623                 return ret;
10624         }
10625
10626         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10627 }
10628
10629 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10630 {
10631         struct hnae3_queue *queue;
10632         struct hclge_tqp *tqp;
10633
10634         queue = handle->kinfo.tqp[queue_id];
10635         tqp = container_of(queue, struct hclge_tqp, q);
10636
10637         return tqp->index;
10638 }
10639
10640 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10641 {
10642         struct hclge_vport *vport = hclge_get_vport(handle);
10643         struct hclge_dev *hdev = vport->back;
10644         u16 reset_try_times = 0;
10645         int reset_status;
10646         u16 queue_gid;
10647         int ret;
10648         u16 i;
10649
10650         for (i = 0; i < handle->kinfo.num_tqps; i++) {
10651                 queue_gid = hclge_covert_handle_qid_global(handle, i);
10652                 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10653                 if (ret) {
10654                         dev_err(&hdev->pdev->dev,
10655                                 "failed to send reset tqp cmd, ret = %d\n",
10656                                 ret);
10657                         return ret;
10658                 }
10659
10660                 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10661                         reset_status = hclge_get_reset_status(hdev, queue_gid);
10662                         if (reset_status)
10663                                 break;
10664
10665                         /* Wait for tqp hw reset */
10666                         usleep_range(1000, 1200);
10667                 }
10668
10669                 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10670                         dev_err(&hdev->pdev->dev,
10671                                 "wait for tqp hw reset timeout\n");
10672                         return -ETIME;
10673                 }
10674
10675                 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10676                 if (ret) {
10677                         dev_err(&hdev->pdev->dev,
10678                                 "failed to deassert soft reset, ret = %d\n",
10679                                 ret);
10680                         return ret;
10681                 }
10682                 reset_try_times = 0;
10683         }
10684         return 0;
10685 }
10686
10687 static int hclge_reset_rcb(struct hnae3_handle *handle)
10688 {
10689 #define HCLGE_RESET_RCB_NOT_SUPPORT     0U
10690 #define HCLGE_RESET_RCB_SUCCESS         1U
10691
10692         struct hclge_vport *vport = hclge_get_vport(handle);
10693         struct hclge_dev *hdev = vport->back;
10694         struct hclge_reset_cmd *req;
10695         struct hclge_desc desc;
10696         u8 return_status;
10697         u16 queue_gid;
10698         int ret;
10699
10700         queue_gid = hclge_covert_handle_qid_global(handle, 0);
10701
10702         req = (struct hclge_reset_cmd *)desc.data;
10703         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10704         hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10705         req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10706         req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10707
10708         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10709         if (ret) {
10710                 dev_err(&hdev->pdev->dev,
10711                         "failed to send rcb reset cmd, ret = %d\n", ret);
10712                 return ret;
10713         }
10714
10715         return_status = req->fun_reset_rcb_return_status;
10716         if (return_status == HCLGE_RESET_RCB_SUCCESS)
10717                 return 0;
10718
10719         if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10720                 dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10721                         return_status);
10722                 return -EIO;
10723         }
10724
10725         /* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10726          * again to reset all tqps
10727          */
10728         return hclge_reset_tqp_cmd(handle);
10729 }
10730
10731 int hclge_reset_tqp(struct hnae3_handle *handle)
10732 {
10733         struct hclge_vport *vport = hclge_get_vport(handle);
10734         struct hclge_dev *hdev = vport->back;
10735         int ret;
10736
10737         /* only need to disable PF's tqp */
10738         if (!vport->vport_id) {
10739                 ret = hclge_tqp_enable(handle, false);
10740                 if (ret) {
10741                         dev_err(&hdev->pdev->dev,
10742                                 "failed to disable tqp, ret = %d\n", ret);
10743                         return ret;
10744                 }
10745         }
10746
10747         return hclge_reset_rcb(handle);
10748 }
10749
10750 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10751 {
10752         struct hclge_vport *vport = hclge_get_vport(handle);
10753         struct hclge_dev *hdev = vport->back;
10754
10755         return hdev->fw_version;
10756 }
10757
10758 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10759 {
10760         struct phy_device *phydev = hdev->hw.mac.phydev;
10761
10762         if (!phydev)
10763                 return;
10764
10765         phy_set_asym_pause(phydev, rx_en, tx_en);
10766 }
10767
10768 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10769 {
10770         int ret;
10771
10772         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10773                 return 0;
10774
10775         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10776         if (ret)
10777                 dev_err(&hdev->pdev->dev,
10778                         "configure pauseparam error, ret = %d.\n", ret);
10779
10780         return ret;
10781 }
10782
10783 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10784 {
10785         struct phy_device *phydev = hdev->hw.mac.phydev;
10786         u16 remote_advertising = 0;
10787         u16 local_advertising;
10788         u32 rx_pause, tx_pause;
10789         u8 flowctl;
10790
10791         if (!phydev->link || !phydev->autoneg)
10792                 return 0;
10793
10794         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10795
10796         if (phydev->pause)
10797                 remote_advertising = LPA_PAUSE_CAP;
10798
10799         if (phydev->asym_pause)
10800                 remote_advertising |= LPA_PAUSE_ASYM;
10801
10802         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10803                                            remote_advertising);
10804         tx_pause = flowctl & FLOW_CTRL_TX;
10805         rx_pause = flowctl & FLOW_CTRL_RX;
10806
10807         if (phydev->duplex == HCLGE_MAC_HALF) {
10808                 tx_pause = 0;
10809                 rx_pause = 0;
10810         }
10811
10812         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10813 }
10814
10815 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10816                                  u32 *rx_en, u32 *tx_en)
10817 {
10818         struct hclge_vport *vport = hclge_get_vport(handle);
10819         struct hclge_dev *hdev = vport->back;
10820         u8 media_type = hdev->hw.mac.media_type;
10821
10822         *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10823                     hclge_get_autoneg(handle) : 0;
10824
10825         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10826                 *rx_en = 0;
10827                 *tx_en = 0;
10828                 return;
10829         }
10830
10831         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10832                 *rx_en = 1;
10833                 *tx_en = 0;
10834         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10835                 *tx_en = 1;
10836                 *rx_en = 0;
10837         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10838                 *rx_en = 1;
10839                 *tx_en = 1;
10840         } else {
10841                 *rx_en = 0;
10842                 *tx_en = 0;
10843         }
10844 }
10845
10846 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10847                                          u32 rx_en, u32 tx_en)
10848 {
10849         if (rx_en && tx_en)
10850                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
10851         else if (rx_en && !tx_en)
10852                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10853         else if (!rx_en && tx_en)
10854                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10855         else
10856                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
10857
10858         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10859 }
10860
10861 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10862                                 u32 rx_en, u32 tx_en)
10863 {
10864         struct hclge_vport *vport = hclge_get_vport(handle);
10865         struct hclge_dev *hdev = vport->back;
10866         struct phy_device *phydev = hdev->hw.mac.phydev;
10867         u32 fc_autoneg;
10868
10869         if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
10870                 fc_autoneg = hclge_get_autoneg(handle);
10871                 if (auto_neg != fc_autoneg) {
10872                         dev_info(&hdev->pdev->dev,
10873                                  "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10874                         return -EOPNOTSUPP;
10875                 }
10876         }
10877
10878         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10879                 dev_info(&hdev->pdev->dev,
10880                          "Priority flow control enabled. Cannot set link flow control.\n");
10881                 return -EOPNOTSUPP;
10882         }
10883
10884         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10885
10886         hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10887
10888         if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
10889                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10890
10891         if (phydev)
10892                 return phy_start_aneg(phydev);
10893
10894         return -EOPNOTSUPP;
10895 }
10896
10897 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10898                                           u8 *auto_neg, u32 *speed, u8 *duplex)
10899 {
10900         struct hclge_vport *vport = hclge_get_vport(handle);
10901         struct hclge_dev *hdev = vport->back;
10902
10903         if (speed)
10904                 *speed = hdev->hw.mac.speed;
10905         if (duplex)
10906                 *duplex = hdev->hw.mac.duplex;
10907         if (auto_neg)
10908                 *auto_neg = hdev->hw.mac.autoneg;
10909 }
10910
10911 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
10912                                  u8 *module_type)
10913 {
10914         struct hclge_vport *vport = hclge_get_vport(handle);
10915         struct hclge_dev *hdev = vport->back;
10916
10917         /* When nic is down, the service task is not running, doesn't update
10918          * the port information per second. Query the port information before
10919          * return the media type, ensure getting the correct media information.
10920          */
10921         hclge_update_port_info(hdev);
10922
10923         if (media_type)
10924                 *media_type = hdev->hw.mac.media_type;
10925
10926         if (module_type)
10927                 *module_type = hdev->hw.mac.module_type;
10928 }
10929
10930 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
10931                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
10932 {
10933         struct hclge_vport *vport = hclge_get_vport(handle);
10934         struct hclge_dev *hdev = vport->back;
10935         struct phy_device *phydev = hdev->hw.mac.phydev;
10936         int mdix_ctrl, mdix, is_resolved;
10937         unsigned int retval;
10938
10939         if (!phydev) {
10940                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10941                 *tp_mdix = ETH_TP_MDI_INVALID;
10942                 return;
10943         }
10944
10945         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
10946
10947         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
10948         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
10949                                     HCLGE_PHY_MDIX_CTRL_S);
10950
10951         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
10952         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
10953         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
10954
10955         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
10956
10957         switch (mdix_ctrl) {
10958         case 0x0:
10959                 *tp_mdix_ctrl = ETH_TP_MDI;
10960                 break;
10961         case 0x1:
10962                 *tp_mdix_ctrl = ETH_TP_MDI_X;
10963                 break;
10964         case 0x3:
10965                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
10966                 break;
10967         default:
10968                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10969                 break;
10970         }
10971
10972         if (!is_resolved)
10973                 *tp_mdix = ETH_TP_MDI_INVALID;
10974         else if (mdix)
10975                 *tp_mdix = ETH_TP_MDI_X;
10976         else
10977                 *tp_mdix = ETH_TP_MDI;
10978 }
10979
10980 static void hclge_info_show(struct hclge_dev *hdev)
10981 {
10982         struct device *dev = &hdev->pdev->dev;
10983
10984         dev_info(dev, "PF info begin:\n");
10985
10986         dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
10987         dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
10988         dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
10989         dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
10990         dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
10991         dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
10992         dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
10993         dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
10994         dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
10995         dev_info(dev, "This is %s PF\n",
10996                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
10997         dev_info(dev, "DCB %s\n",
10998                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
10999         dev_info(dev, "MQPRIO %s\n",
11000                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
11001
11002         dev_info(dev, "PF info end.\n");
11003 }
11004
11005 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
11006                                           struct hclge_vport *vport)
11007 {
11008         struct hnae3_client *client = vport->nic.client;
11009         struct hclge_dev *hdev = ae_dev->priv;
11010         int rst_cnt = hdev->rst_stats.reset_cnt;
11011         int ret;
11012
11013         ret = client->ops->init_instance(&vport->nic);
11014         if (ret)
11015                 return ret;
11016
11017         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11018         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11019             rst_cnt != hdev->rst_stats.reset_cnt) {
11020                 ret = -EBUSY;
11021                 goto init_nic_err;
11022         }
11023
11024         /* Enable nic hw error interrupts */
11025         ret = hclge_config_nic_hw_error(hdev, true);
11026         if (ret) {
11027                 dev_err(&ae_dev->pdev->dev,
11028                         "fail(%d) to enable hw error interrupts\n", ret);
11029                 goto init_nic_err;
11030         }
11031
11032         hnae3_set_client_init_flag(client, ae_dev, 1);
11033
11034         if (netif_msg_drv(&hdev->vport->nic))
11035                 hclge_info_show(hdev);
11036
11037         return ret;
11038
11039 init_nic_err:
11040         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11041         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11042                 msleep(HCLGE_WAIT_RESET_DONE);
11043
11044         client->ops->uninit_instance(&vport->nic, 0);
11045
11046         return ret;
11047 }
11048
11049 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
11050                                            struct hclge_vport *vport)
11051 {
11052         struct hclge_dev *hdev = ae_dev->priv;
11053         struct hnae3_client *client;
11054         int rst_cnt;
11055         int ret;
11056
11057         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
11058             !hdev->nic_client)
11059                 return 0;
11060
11061         client = hdev->roce_client;
11062         ret = hclge_init_roce_base_info(vport);
11063         if (ret)
11064                 return ret;
11065
11066         rst_cnt = hdev->rst_stats.reset_cnt;
11067         ret = client->ops->init_instance(&vport->roce);
11068         if (ret)
11069                 return ret;
11070
11071         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11072         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11073             rst_cnt != hdev->rst_stats.reset_cnt) {
11074                 ret = -EBUSY;
11075                 goto init_roce_err;
11076         }
11077
11078         /* Enable roce ras interrupts */
11079         ret = hclge_config_rocee_ras_interrupt(hdev, true);
11080         if (ret) {
11081                 dev_err(&ae_dev->pdev->dev,
11082                         "fail(%d) to enable roce ras interrupts\n", ret);
11083                 goto init_roce_err;
11084         }
11085
11086         hnae3_set_client_init_flag(client, ae_dev, 1);
11087
11088         return 0;
11089
11090 init_roce_err:
11091         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11092         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11093                 msleep(HCLGE_WAIT_RESET_DONE);
11094
11095         hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11096
11097         return ret;
11098 }
11099
11100 static int hclge_init_client_instance(struct hnae3_client *client,
11101                                       struct hnae3_ae_dev *ae_dev)
11102 {
11103         struct hclge_dev *hdev = ae_dev->priv;
11104         struct hclge_vport *vport = &hdev->vport[0];
11105         int ret;
11106
11107         switch (client->type) {
11108         case HNAE3_CLIENT_KNIC:
11109                 hdev->nic_client = client;
11110                 vport->nic.client = client;
11111                 ret = hclge_init_nic_client_instance(ae_dev, vport);
11112                 if (ret)
11113                         goto clear_nic;
11114
11115                 ret = hclge_init_roce_client_instance(ae_dev, vport);
11116                 if (ret)
11117                         goto clear_roce;
11118
11119                 break;
11120         case HNAE3_CLIENT_ROCE:
11121                 if (hnae3_dev_roce_supported(hdev)) {
11122                         hdev->roce_client = client;
11123                         vport->roce.client = client;
11124                 }
11125
11126                 ret = hclge_init_roce_client_instance(ae_dev, vport);
11127                 if (ret)
11128                         goto clear_roce;
11129
11130                 break;
11131         default:
11132                 return -EINVAL;
11133         }
11134
11135         return 0;
11136
11137 clear_nic:
11138         hdev->nic_client = NULL;
11139         vport->nic.client = NULL;
11140         return ret;
11141 clear_roce:
11142         hdev->roce_client = NULL;
11143         vport->roce.client = NULL;
11144         return ret;
11145 }
11146
11147 static void hclge_uninit_client_instance(struct hnae3_client *client,
11148                                          struct hnae3_ae_dev *ae_dev)
11149 {
11150         struct hclge_dev *hdev = ae_dev->priv;
11151         struct hclge_vport *vport = &hdev->vport[0];
11152
11153         if (hdev->roce_client) {
11154                 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11155                 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11156                         msleep(HCLGE_WAIT_RESET_DONE);
11157
11158                 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11159                 hdev->roce_client = NULL;
11160                 vport->roce.client = NULL;
11161         }
11162         if (client->type == HNAE3_CLIENT_ROCE)
11163                 return;
11164         if (hdev->nic_client && client->ops->uninit_instance) {
11165                 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11166                 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11167                         msleep(HCLGE_WAIT_RESET_DONE);
11168
11169                 client->ops->uninit_instance(&vport->nic, 0);
11170                 hdev->nic_client = NULL;
11171                 vport->nic.client = NULL;
11172         }
11173 }
11174
11175 static int hclge_dev_mem_map(struct hclge_dev *hdev)
11176 {
11177 #define HCLGE_MEM_BAR           4
11178
11179         struct pci_dev *pdev = hdev->pdev;
11180         struct hclge_hw *hw = &hdev->hw;
11181
11182         /* for device does not have device memory, return directly */
11183         if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
11184                 return 0;
11185
11186         hw->mem_base = devm_ioremap_wc(&pdev->dev,
11187                                        pci_resource_start(pdev, HCLGE_MEM_BAR),
11188                                        pci_resource_len(pdev, HCLGE_MEM_BAR));
11189         if (!hw->mem_base) {
11190                 dev_err(&pdev->dev, "failed to map device memory\n");
11191                 return -EFAULT;
11192         }
11193
11194         return 0;
11195 }
11196
11197 static int hclge_pci_init(struct hclge_dev *hdev)
11198 {
11199         struct pci_dev *pdev = hdev->pdev;
11200         struct hclge_hw *hw;
11201         int ret;
11202
11203         ret = pci_enable_device(pdev);
11204         if (ret) {
11205                 dev_err(&pdev->dev, "failed to enable PCI device\n");
11206                 return ret;
11207         }
11208
11209         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11210         if (ret) {
11211                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11212                 if (ret) {
11213                         dev_err(&pdev->dev,
11214                                 "can't set consistent PCI DMA");
11215                         goto err_disable_device;
11216                 }
11217                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11218         }
11219
11220         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11221         if (ret) {
11222                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11223                 goto err_disable_device;
11224         }
11225
11226         pci_set_master(pdev);
11227         hw = &hdev->hw;
11228         hw->io_base = pcim_iomap(pdev, 2, 0);
11229         if (!hw->io_base) {
11230                 dev_err(&pdev->dev, "Can't map configuration register space\n");
11231                 ret = -ENOMEM;
11232                 goto err_clr_master;
11233         }
11234
11235         ret = hclge_dev_mem_map(hdev);
11236         if (ret)
11237                 goto err_unmap_io_base;
11238
11239         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11240
11241         return 0;
11242
11243 err_unmap_io_base:
11244         pcim_iounmap(pdev, hdev->hw.io_base);
11245 err_clr_master:
11246         pci_clear_master(pdev);
11247         pci_release_regions(pdev);
11248 err_disable_device:
11249         pci_disable_device(pdev);
11250
11251         return ret;
11252 }
11253
11254 static void hclge_pci_uninit(struct hclge_dev *hdev)
11255 {
11256         struct pci_dev *pdev = hdev->pdev;
11257
11258         if (hdev->hw.mem_base)
11259                 devm_iounmap(&pdev->dev, hdev->hw.mem_base);
11260
11261         pcim_iounmap(pdev, hdev->hw.io_base);
11262         pci_free_irq_vectors(pdev);
11263         pci_clear_master(pdev);
11264         pci_release_mem_regions(pdev);
11265         pci_disable_device(pdev);
11266 }
11267
11268 static void hclge_state_init(struct hclge_dev *hdev)
11269 {
11270         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11271         set_bit(HCLGE_STATE_DOWN, &hdev->state);
11272         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11273         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11274         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11275         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11276         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11277 }
11278
11279 static void hclge_state_uninit(struct hclge_dev *hdev)
11280 {
11281         set_bit(HCLGE_STATE_DOWN, &hdev->state);
11282         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11283
11284         if (hdev->reset_timer.function)
11285                 del_timer_sync(&hdev->reset_timer);
11286         if (hdev->service_task.work.func)
11287                 cancel_delayed_work_sync(&hdev->service_task);
11288 }
11289
11290 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
11291                                         enum hnae3_reset_type rst_type)
11292 {
11293 #define HCLGE_RESET_RETRY_WAIT_MS       500
11294 #define HCLGE_RESET_RETRY_CNT   5
11295
11296         struct hclge_dev *hdev = ae_dev->priv;
11297         int retry_cnt = 0;
11298         int ret;
11299
11300 retry:
11301         down(&hdev->reset_sem);
11302         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11303         hdev->reset_type = rst_type;
11304         ret = hclge_reset_prepare(hdev);
11305         if (ret || hdev->reset_pending) {
11306                 dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
11307                         ret);
11308                 if (hdev->reset_pending ||
11309                     retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
11310                         dev_err(&hdev->pdev->dev,
11311                                 "reset_pending:0x%lx, retry_cnt:%d\n",
11312                                 hdev->reset_pending, retry_cnt);
11313                         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11314                         up(&hdev->reset_sem);
11315                         msleep(HCLGE_RESET_RETRY_WAIT_MS);
11316                         goto retry;
11317                 }
11318         }
11319
11320         /* disable misc vector before reset done */
11321         hclge_enable_vector(&hdev->misc_vector, false);
11322         set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
11323
11324         if (hdev->reset_type == HNAE3_FLR_RESET)
11325                 hdev->rst_stats.flr_rst_cnt++;
11326 }
11327
11328 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
11329 {
11330         struct hclge_dev *hdev = ae_dev->priv;
11331         int ret;
11332
11333         hclge_enable_vector(&hdev->misc_vector, true);
11334
11335         ret = hclge_reset_rebuild(hdev);
11336         if (ret)
11337                 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11338
11339         hdev->reset_type = HNAE3_NONE_RESET;
11340         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11341         up(&hdev->reset_sem);
11342 }
11343
11344 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11345 {
11346         u16 i;
11347
11348         for (i = 0; i < hdev->num_alloc_vport; i++) {
11349                 struct hclge_vport *vport = &hdev->vport[i];
11350                 int ret;
11351
11352                  /* Send cmd to clear VF's FUNC_RST_ING */
11353                 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11354                 if (ret)
11355                         dev_warn(&hdev->pdev->dev,
11356                                  "clear vf(%u) rst failed %d!\n",
11357                                  vport->vport_id, ret);
11358         }
11359 }
11360
11361 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
11362 {
11363         if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11364                 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
11365 }
11366
11367 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
11368 {
11369         if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11370                 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
11371 }
11372
11373 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11374 {
11375         struct pci_dev *pdev = ae_dev->pdev;
11376         struct hclge_dev *hdev;
11377         int ret;
11378
11379         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11380         if (!hdev)
11381                 return -ENOMEM;
11382
11383         hdev->pdev = pdev;
11384         hdev->ae_dev = ae_dev;
11385         hdev->reset_type = HNAE3_NONE_RESET;
11386         hdev->reset_level = HNAE3_FUNC_RESET;
11387         ae_dev->priv = hdev;
11388
11389         /* HW supprt 2 layer vlan */
11390         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11391
11392         mutex_init(&hdev->vport_lock);
11393         spin_lock_init(&hdev->fd_rule_lock);
11394         sema_init(&hdev->reset_sem, 1);
11395
11396         ret = hclge_pci_init(hdev);
11397         if (ret)
11398                 goto out;
11399
11400         /* Firmware command queue initialize */
11401         ret = hclge_cmd_queue_init(hdev);
11402         if (ret)
11403                 goto err_pci_uninit;
11404
11405         /* Firmware command initialize */
11406         ret = hclge_cmd_init(hdev);
11407         if (ret)
11408                 goto err_cmd_uninit;
11409
11410         ret = hclge_get_cap(hdev);
11411         if (ret)
11412                 goto err_cmd_uninit;
11413
11414         ret = hclge_query_dev_specs(hdev);
11415         if (ret) {
11416                 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11417                         ret);
11418                 goto err_cmd_uninit;
11419         }
11420
11421         ret = hclge_configure(hdev);
11422         if (ret) {
11423                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11424                 goto err_cmd_uninit;
11425         }
11426
11427         ret = hclge_init_msi(hdev);
11428         if (ret) {
11429                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11430                 goto err_cmd_uninit;
11431         }
11432
11433         ret = hclge_misc_irq_init(hdev);
11434         if (ret)
11435                 goto err_msi_uninit;
11436
11437         ret = hclge_alloc_tqps(hdev);
11438         if (ret) {
11439                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11440                 goto err_msi_irq_uninit;
11441         }
11442
11443         ret = hclge_alloc_vport(hdev);
11444         if (ret)
11445                 goto err_msi_irq_uninit;
11446
11447         ret = hclge_map_tqp(hdev);
11448         if (ret)
11449                 goto err_msi_irq_uninit;
11450
11451         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
11452             !hnae3_dev_phy_imp_supported(hdev)) {
11453                 ret = hclge_mac_mdio_config(hdev);
11454                 if (ret)
11455                         goto err_msi_irq_uninit;
11456         }
11457
11458         ret = hclge_init_umv_space(hdev);
11459         if (ret)
11460                 goto err_mdiobus_unreg;
11461
11462         ret = hclge_mac_init(hdev);
11463         if (ret) {
11464                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11465                 goto err_mdiobus_unreg;
11466         }
11467
11468         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11469         if (ret) {
11470                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11471                 goto err_mdiobus_unreg;
11472         }
11473
11474         ret = hclge_config_gro(hdev, true);
11475         if (ret)
11476                 goto err_mdiobus_unreg;
11477
11478         ret = hclge_init_vlan_config(hdev);
11479         if (ret) {
11480                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11481                 goto err_mdiobus_unreg;
11482         }
11483
11484         ret = hclge_tm_schd_init(hdev);
11485         if (ret) {
11486                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11487                 goto err_mdiobus_unreg;
11488         }
11489
11490         ret = hclge_rss_init_cfg(hdev);
11491         if (ret) {
11492                 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11493                 goto err_mdiobus_unreg;
11494         }
11495
11496         ret = hclge_rss_init_hw(hdev);
11497         if (ret) {
11498                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11499                 goto err_mdiobus_unreg;
11500         }
11501
11502         ret = init_mgr_tbl(hdev);
11503         if (ret) {
11504                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11505                 goto err_mdiobus_unreg;
11506         }
11507
11508         ret = hclge_init_fd_config(hdev);
11509         if (ret) {
11510                 dev_err(&pdev->dev,
11511                         "fd table init fail, ret=%d\n", ret);
11512                 goto err_mdiobus_unreg;
11513         }
11514
11515         INIT_KFIFO(hdev->mac_tnl_log);
11516
11517         hclge_dcb_ops_set(hdev);
11518
11519         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11520         INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11521
11522         /* Setup affinity after service timer setup because add_timer_on
11523          * is called in affinity notify.
11524          */
11525         hclge_misc_affinity_setup(hdev);
11526
11527         hclge_clear_all_event_cause(hdev);
11528         hclge_clear_resetting_state(hdev);
11529
11530         /* Log and clear the hw errors those already occurred */
11531         hclge_handle_all_hns_hw_errors(ae_dev);
11532
11533         /* request delayed reset for the error recovery because an immediate
11534          * global reset on a PF affecting pending initialization of other PFs
11535          */
11536         if (ae_dev->hw_err_reset_req) {
11537                 enum hnae3_reset_type reset_level;
11538
11539                 reset_level = hclge_get_reset_level(ae_dev,
11540                                                     &ae_dev->hw_err_reset_req);
11541                 hclge_set_def_reset_request(ae_dev, reset_level);
11542                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11543         }
11544
11545         hclge_init_rxd_adv_layout(hdev);
11546
11547         /* Enable MISC vector(vector0) */
11548         hclge_enable_vector(&hdev->misc_vector, true);
11549
11550         hclge_state_init(hdev);
11551         hdev->last_reset_time = jiffies;
11552
11553         dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11554                  HCLGE_DRIVER_NAME);
11555
11556         hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11557
11558         return 0;
11559
11560 err_mdiobus_unreg:
11561         if (hdev->hw.mac.phydev)
11562                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
11563 err_msi_irq_uninit:
11564         hclge_misc_irq_uninit(hdev);
11565 err_msi_uninit:
11566         pci_free_irq_vectors(pdev);
11567 err_cmd_uninit:
11568         hclge_cmd_uninit(hdev);
11569 err_pci_uninit:
11570         pcim_iounmap(pdev, hdev->hw.io_base);
11571         pci_clear_master(pdev);
11572         pci_release_regions(pdev);
11573         pci_disable_device(pdev);
11574 out:
11575         mutex_destroy(&hdev->vport_lock);
11576         return ret;
11577 }
11578
11579 static void hclge_stats_clear(struct hclge_dev *hdev)
11580 {
11581         memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11582 }
11583
11584 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11585 {
11586         return hclge_config_switch_param(hdev, vf, enable,
11587                                          HCLGE_SWITCH_ANTI_SPOOF_MASK);
11588 }
11589
11590 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11591 {
11592         return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11593                                           HCLGE_FILTER_FE_NIC_INGRESS_B,
11594                                           enable, vf);
11595 }
11596
11597 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11598 {
11599         int ret;
11600
11601         ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11602         if (ret) {
11603                 dev_err(&hdev->pdev->dev,
11604                         "Set vf %d mac spoof check %s failed, ret=%d\n",
11605                         vf, enable ? "on" : "off", ret);
11606                 return ret;
11607         }
11608
11609         ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11610         if (ret)
11611                 dev_err(&hdev->pdev->dev,
11612                         "Set vf %d vlan spoof check %s failed, ret=%d\n",
11613                         vf, enable ? "on" : "off", ret);
11614
11615         return ret;
11616 }
11617
11618 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11619                                  bool enable)
11620 {
11621         struct hclge_vport *vport = hclge_get_vport(handle);
11622         struct hclge_dev *hdev = vport->back;
11623         u32 new_spoofchk = enable ? 1 : 0;
11624         int ret;
11625
11626         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11627                 return -EOPNOTSUPP;
11628
11629         vport = hclge_get_vf_vport(hdev, vf);
11630         if (!vport)
11631                 return -EINVAL;
11632
11633         if (vport->vf_info.spoofchk == new_spoofchk)
11634                 return 0;
11635
11636         if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11637                 dev_warn(&hdev->pdev->dev,
11638                          "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11639                          vf);
11640         else if (enable && hclge_is_umv_space_full(vport, true))
11641                 dev_warn(&hdev->pdev->dev,
11642                          "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11643                          vf);
11644
11645         ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11646         if (ret)
11647                 return ret;
11648
11649         vport->vf_info.spoofchk = new_spoofchk;
11650         return 0;
11651 }
11652
11653 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11654 {
11655         struct hclge_vport *vport = hdev->vport;
11656         int ret;
11657         int i;
11658
11659         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11660                 return 0;
11661
11662         /* resume the vf spoof check state after reset */
11663         for (i = 0; i < hdev->num_alloc_vport; i++) {
11664                 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11665                                                vport->vf_info.spoofchk);
11666                 if (ret)
11667                         return ret;
11668
11669                 vport++;
11670         }
11671
11672         return 0;
11673 }
11674
11675 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11676 {
11677         struct hclge_vport *vport = hclge_get_vport(handle);
11678         struct hclge_dev *hdev = vport->back;
11679         u32 new_trusted = enable ? 1 : 0;
11680
11681         vport = hclge_get_vf_vport(hdev, vf);
11682         if (!vport)
11683                 return -EINVAL;
11684
11685         if (vport->vf_info.trusted == new_trusted)
11686                 return 0;
11687
11688         vport->vf_info.trusted = new_trusted;
11689         set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
11690         hclge_task_schedule(hdev, 0);
11691
11692         return 0;
11693 }
11694
11695 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11696 {
11697         int ret;
11698         int vf;
11699
11700         /* reset vf rate to default value */
11701         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11702                 struct hclge_vport *vport = &hdev->vport[vf];
11703
11704                 vport->vf_info.max_tx_rate = 0;
11705                 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11706                 if (ret)
11707                         dev_err(&hdev->pdev->dev,
11708                                 "vf%d failed to reset to default, ret=%d\n",
11709                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
11710         }
11711 }
11712
11713 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11714                                      int min_tx_rate, int max_tx_rate)
11715 {
11716         if (min_tx_rate != 0 ||
11717             max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11718                 dev_err(&hdev->pdev->dev,
11719                         "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11720                         min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11721                 return -EINVAL;
11722         }
11723
11724         return 0;
11725 }
11726
11727 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11728                              int min_tx_rate, int max_tx_rate, bool force)
11729 {
11730         struct hclge_vport *vport = hclge_get_vport(handle);
11731         struct hclge_dev *hdev = vport->back;
11732         int ret;
11733
11734         ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11735         if (ret)
11736                 return ret;
11737
11738         vport = hclge_get_vf_vport(hdev, vf);
11739         if (!vport)
11740                 return -EINVAL;
11741
11742         if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11743                 return 0;
11744
11745         ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11746         if (ret)
11747                 return ret;
11748
11749         vport->vf_info.max_tx_rate = max_tx_rate;
11750
11751         return 0;
11752 }
11753
11754 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11755 {
11756         struct hnae3_handle *handle = &hdev->vport->nic;
11757         struct hclge_vport *vport;
11758         int ret;
11759         int vf;
11760
11761         /* resume the vf max_tx_rate after reset */
11762         for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11763                 vport = hclge_get_vf_vport(hdev, vf);
11764                 if (!vport)
11765                         return -EINVAL;
11766
11767                 /* zero means max rate, after reset, firmware already set it to
11768                  * max rate, so just continue.
11769                  */
11770                 if (!vport->vf_info.max_tx_rate)
11771                         continue;
11772
11773                 ret = hclge_set_vf_rate(handle, vf, 0,
11774                                         vport->vf_info.max_tx_rate, true);
11775                 if (ret) {
11776                         dev_err(&hdev->pdev->dev,
11777                                 "vf%d failed to resume tx_rate:%u, ret=%d\n",
11778                                 vf, vport->vf_info.max_tx_rate, ret);
11779                         return ret;
11780                 }
11781         }
11782
11783         return 0;
11784 }
11785
11786 static void hclge_reset_vport_state(struct hclge_dev *hdev)
11787 {
11788         struct hclge_vport *vport = hdev->vport;
11789         int i;
11790
11791         for (i = 0; i < hdev->num_alloc_vport; i++) {
11792                 hclge_vport_stop(vport);
11793                 vport++;
11794         }
11795 }
11796
11797 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11798 {
11799         struct hclge_dev *hdev = ae_dev->priv;
11800         struct pci_dev *pdev = ae_dev->pdev;
11801         int ret;
11802
11803         set_bit(HCLGE_STATE_DOWN, &hdev->state);
11804
11805         hclge_stats_clear(hdev);
11806         /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11807          * so here should not clean table in memory.
11808          */
11809         if (hdev->reset_type == HNAE3_IMP_RESET ||
11810             hdev->reset_type == HNAE3_GLOBAL_RESET) {
11811                 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11812                 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11813                 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11814                 hclge_reset_umv_space(hdev);
11815         }
11816
11817         ret = hclge_cmd_init(hdev);
11818         if (ret) {
11819                 dev_err(&pdev->dev, "Cmd queue init failed\n");
11820                 return ret;
11821         }
11822
11823         ret = hclge_map_tqp(hdev);
11824         if (ret) {
11825                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11826                 return ret;
11827         }
11828
11829         ret = hclge_mac_init(hdev);
11830         if (ret) {
11831                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11832                 return ret;
11833         }
11834
11835         ret = hclge_tp_port_init(hdev);
11836         if (ret) {
11837                 dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
11838                         ret);
11839                 return ret;
11840         }
11841
11842         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11843         if (ret) {
11844                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11845                 return ret;
11846         }
11847
11848         ret = hclge_config_gro(hdev, true);
11849         if (ret)
11850                 return ret;
11851
11852         ret = hclge_init_vlan_config(hdev);
11853         if (ret) {
11854                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11855                 return ret;
11856         }
11857
11858         ret = hclge_tm_init_hw(hdev, true);
11859         if (ret) {
11860                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
11861                 return ret;
11862         }
11863
11864         ret = hclge_rss_init_hw(hdev);
11865         if (ret) {
11866                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11867                 return ret;
11868         }
11869
11870         ret = init_mgr_tbl(hdev);
11871         if (ret) {
11872                 dev_err(&pdev->dev,
11873                         "failed to reinit manager table, ret = %d\n", ret);
11874                 return ret;
11875         }
11876
11877         ret = hclge_init_fd_config(hdev);
11878         if (ret) {
11879                 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
11880                 return ret;
11881         }
11882
11883         /* Log and clear the hw errors those already occurred */
11884         hclge_handle_all_hns_hw_errors(ae_dev);
11885
11886         /* Re-enable the hw error interrupts because
11887          * the interrupts get disabled on global reset.
11888          */
11889         ret = hclge_config_nic_hw_error(hdev, true);
11890         if (ret) {
11891                 dev_err(&pdev->dev,
11892                         "fail(%d) to re-enable NIC hw error interrupts\n",
11893                         ret);
11894                 return ret;
11895         }
11896
11897         if (hdev->roce_client) {
11898                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
11899                 if (ret) {
11900                         dev_err(&pdev->dev,
11901                                 "fail(%d) to re-enable roce ras interrupts\n",
11902                                 ret);
11903                         return ret;
11904                 }
11905         }
11906
11907         hclge_reset_vport_state(hdev);
11908         ret = hclge_reset_vport_spoofchk(hdev);
11909         if (ret)
11910                 return ret;
11911
11912         ret = hclge_resume_vf_rate(hdev);
11913         if (ret)
11914                 return ret;
11915
11916         hclge_init_rxd_adv_layout(hdev);
11917
11918         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
11919                  HCLGE_DRIVER_NAME);
11920
11921         return 0;
11922 }
11923
11924 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
11925 {
11926         struct hclge_dev *hdev = ae_dev->priv;
11927         struct hclge_mac *mac = &hdev->hw.mac;
11928
11929         hclge_reset_vf_rate(hdev);
11930         hclge_clear_vf_vlan(hdev);
11931         hclge_misc_affinity_teardown(hdev);
11932         hclge_state_uninit(hdev);
11933         hclge_uninit_rxd_adv_layout(hdev);
11934         hclge_uninit_mac_table(hdev);
11935         hclge_del_all_fd_entries(hdev);
11936
11937         if (mac->phydev)
11938                 mdiobus_unregister(mac->mdio_bus);
11939
11940         /* Disable MISC vector(vector0) */
11941         hclge_enable_vector(&hdev->misc_vector, false);
11942         synchronize_irq(hdev->misc_vector.vector_irq);
11943
11944         /* Disable all hw interrupts */
11945         hclge_config_mac_tnl_int(hdev, false);
11946         hclge_config_nic_hw_error(hdev, false);
11947         hclge_config_rocee_ras_interrupt(hdev, false);
11948
11949         hclge_cmd_uninit(hdev);
11950         hclge_misc_irq_uninit(hdev);
11951         hclge_pci_uninit(hdev);
11952         mutex_destroy(&hdev->vport_lock);
11953         hclge_uninit_vport_vlan_table(hdev);
11954         ae_dev->priv = NULL;
11955 }
11956
11957 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
11958 {
11959         struct hclge_vport *vport = hclge_get_vport(handle);
11960         struct hclge_dev *hdev = vport->back;
11961
11962         return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
11963 }
11964
11965 static void hclge_get_channels(struct hnae3_handle *handle,
11966                                struct ethtool_channels *ch)
11967 {
11968         ch->max_combined = hclge_get_max_channels(handle);
11969         ch->other_count = 1;
11970         ch->max_other = 1;
11971         ch->combined_count = handle->kinfo.rss_size;
11972 }
11973
11974 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
11975                                         u16 *alloc_tqps, u16 *max_rss_size)
11976 {
11977         struct hclge_vport *vport = hclge_get_vport(handle);
11978         struct hclge_dev *hdev = vport->back;
11979
11980         *alloc_tqps = vport->alloc_tqps;
11981         *max_rss_size = hdev->pf_rss_size_max;
11982 }
11983
11984 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
11985                               bool rxfh_configured)
11986 {
11987         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
11988         struct hclge_vport *vport = hclge_get_vport(handle);
11989         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
11990         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
11991         struct hclge_dev *hdev = vport->back;
11992         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
11993         u16 cur_rss_size = kinfo->rss_size;
11994         u16 cur_tqps = kinfo->num_tqps;
11995         u16 tc_valid[HCLGE_MAX_TC_NUM];
11996         u16 roundup_size;
11997         u32 *rss_indir;
11998         unsigned int i;
11999         int ret;
12000
12001         kinfo->req_rss_size = new_tqps_num;
12002
12003         ret = hclge_tm_vport_map_update(hdev);
12004         if (ret) {
12005                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
12006                 return ret;
12007         }
12008
12009         roundup_size = roundup_pow_of_two(kinfo->rss_size);
12010         roundup_size = ilog2(roundup_size);
12011         /* Set the RSS TC mode according to the new RSS size */
12012         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
12013                 tc_valid[i] = 0;
12014
12015                 if (!(hdev->hw_tc_map & BIT(i)))
12016                         continue;
12017
12018                 tc_valid[i] = 1;
12019                 tc_size[i] = roundup_size;
12020                 tc_offset[i] = kinfo->rss_size * i;
12021         }
12022         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
12023         if (ret)
12024                 return ret;
12025
12026         /* RSS indirection table has been configured by user */
12027         if (rxfh_configured)
12028                 goto out;
12029
12030         /* Reinitializes the rss indirect table according to the new RSS size */
12031         rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
12032                             GFP_KERNEL);
12033         if (!rss_indir)
12034                 return -ENOMEM;
12035
12036         for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
12037                 rss_indir[i] = i % kinfo->rss_size;
12038
12039         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
12040         if (ret)
12041                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
12042                         ret);
12043
12044         kfree(rss_indir);
12045
12046 out:
12047         if (!ret)
12048                 dev_info(&hdev->pdev->dev,
12049                          "Channels changed, rss_size from %u to %u, tqps from %u to %u",
12050                          cur_rss_size, kinfo->rss_size,
12051                          cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
12052
12053         return ret;
12054 }
12055
12056 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
12057                               u32 *regs_num_64_bit)
12058 {
12059         struct hclge_desc desc;
12060         u32 total_num;
12061         int ret;
12062
12063         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
12064         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12065         if (ret) {
12066                 dev_err(&hdev->pdev->dev,
12067                         "Query register number cmd failed, ret = %d.\n", ret);
12068                 return ret;
12069         }
12070
12071         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
12072         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
12073
12074         total_num = *regs_num_32_bit + *regs_num_64_bit;
12075         if (!total_num)
12076                 return -EINVAL;
12077
12078         return 0;
12079 }
12080
12081 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12082                                  void *data)
12083 {
12084 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
12085 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
12086
12087         struct hclge_desc *desc;
12088         u32 *reg_val = data;
12089         __le32 *desc_data;
12090         int nodata_num;
12091         int cmd_num;
12092         int i, k, n;
12093         int ret;
12094
12095         if (regs_num == 0)
12096                 return 0;
12097
12098         nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
12099         cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
12100                                HCLGE_32_BIT_REG_RTN_DATANUM);
12101         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12102         if (!desc)
12103                 return -ENOMEM;
12104
12105         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
12106         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12107         if (ret) {
12108                 dev_err(&hdev->pdev->dev,
12109                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
12110                 kfree(desc);
12111                 return ret;
12112         }
12113
12114         for (i = 0; i < cmd_num; i++) {
12115                 if (i == 0) {
12116                         desc_data = (__le32 *)(&desc[i].data[0]);
12117                         n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
12118                 } else {
12119                         desc_data = (__le32 *)(&desc[i]);
12120                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
12121                 }
12122                 for (k = 0; k < n; k++) {
12123                         *reg_val++ = le32_to_cpu(*desc_data++);
12124
12125                         regs_num--;
12126                         if (!regs_num)
12127                                 break;
12128                 }
12129         }
12130
12131         kfree(desc);
12132         return 0;
12133 }
12134
12135 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12136                                  void *data)
12137 {
12138 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
12139 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
12140
12141         struct hclge_desc *desc;
12142         u64 *reg_val = data;
12143         __le64 *desc_data;
12144         int nodata_len;
12145         int cmd_num;
12146         int i, k, n;
12147         int ret;
12148
12149         if (regs_num == 0)
12150                 return 0;
12151
12152         nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
12153         cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
12154                                HCLGE_64_BIT_REG_RTN_DATANUM);
12155         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12156         if (!desc)
12157                 return -ENOMEM;
12158
12159         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
12160         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12161         if (ret) {
12162                 dev_err(&hdev->pdev->dev,
12163                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
12164                 kfree(desc);
12165                 return ret;
12166         }
12167
12168         for (i = 0; i < cmd_num; i++) {
12169                 if (i == 0) {
12170                         desc_data = (__le64 *)(&desc[i].data[0]);
12171                         n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
12172                 } else {
12173                         desc_data = (__le64 *)(&desc[i]);
12174                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
12175                 }
12176                 for (k = 0; k < n; k++) {
12177                         *reg_val++ = le64_to_cpu(*desc_data++);
12178
12179                         regs_num--;
12180                         if (!regs_num)
12181                                 break;
12182                 }
12183         }
12184
12185         kfree(desc);
12186         return 0;
12187 }
12188
12189 #define MAX_SEPARATE_NUM        4
12190 #define SEPARATOR_VALUE         0xFDFCFBFA
12191 #define REG_NUM_PER_LINE        4
12192 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
12193 #define REG_SEPARATOR_LINE      1
12194 #define REG_NUM_REMAIN_MASK     3
12195
12196 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
12197 {
12198         int i;
12199
12200         /* initialize command BD except the last one */
12201         for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
12202                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
12203                                            true);
12204                 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12205         }
12206
12207         /* initialize the last command BD */
12208         hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
12209
12210         return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
12211 }
12212
12213 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
12214                                     int *bd_num_list,
12215                                     u32 type_num)
12216 {
12217         u32 entries_per_desc, desc_index, index, offset, i;
12218         struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
12219         int ret;
12220
12221         ret = hclge_query_bd_num_cmd_send(hdev, desc);
12222         if (ret) {
12223                 dev_err(&hdev->pdev->dev,
12224                         "Get dfx bd num fail, status is %d.\n", ret);
12225                 return ret;
12226         }
12227
12228         entries_per_desc = ARRAY_SIZE(desc[0].data);
12229         for (i = 0; i < type_num; i++) {
12230                 offset = hclge_dfx_bd_offset_list[i];
12231                 index = offset % entries_per_desc;
12232                 desc_index = offset / entries_per_desc;
12233                 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
12234         }
12235
12236         return ret;
12237 }
12238
12239 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
12240                                   struct hclge_desc *desc_src, int bd_num,
12241                                   enum hclge_opcode_type cmd)
12242 {
12243         struct hclge_desc *desc = desc_src;
12244         int i, ret;
12245
12246         hclge_cmd_setup_basic_desc(desc, cmd, true);
12247         for (i = 0; i < bd_num - 1; i++) {
12248                 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12249                 desc++;
12250                 hclge_cmd_setup_basic_desc(desc, cmd, true);
12251         }
12252
12253         desc = desc_src;
12254         ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
12255         if (ret)
12256                 dev_err(&hdev->pdev->dev,
12257                         "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
12258                         cmd, ret);
12259
12260         return ret;
12261 }
12262
12263 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
12264                                     void *data)
12265 {
12266         int entries_per_desc, reg_num, separator_num, desc_index, index, i;
12267         struct hclge_desc *desc = desc_src;
12268         u32 *reg = data;
12269
12270         entries_per_desc = ARRAY_SIZE(desc->data);
12271         reg_num = entries_per_desc * bd_num;
12272         separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
12273         for (i = 0; i < reg_num; i++) {
12274                 index = i % entries_per_desc;
12275                 desc_index = i / entries_per_desc;
12276                 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
12277         }
12278         for (i = 0; i < separator_num; i++)
12279                 *reg++ = SEPARATOR_VALUE;
12280
12281         return reg_num + separator_num;
12282 }
12283
12284 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
12285 {
12286         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12287         int data_len_per_desc, bd_num, i;
12288         int *bd_num_list;
12289         u32 data_len;
12290         int ret;
12291
12292         bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12293         if (!bd_num_list)
12294                 return -ENOMEM;
12295
12296         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12297         if (ret) {
12298                 dev_err(&hdev->pdev->dev,
12299                         "Get dfx reg bd num fail, status is %d.\n", ret);
12300                 goto out;
12301         }
12302
12303         data_len_per_desc = sizeof_field(struct hclge_desc, data);
12304         *len = 0;
12305         for (i = 0; i < dfx_reg_type_num; i++) {
12306                 bd_num = bd_num_list[i];
12307                 data_len = data_len_per_desc * bd_num;
12308                 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
12309         }
12310
12311 out:
12312         kfree(bd_num_list);
12313         return ret;
12314 }
12315
12316 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
12317 {
12318         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12319         int bd_num, bd_num_max, buf_len, i;
12320         struct hclge_desc *desc_src;
12321         int *bd_num_list;
12322         u32 *reg = data;
12323         int ret;
12324
12325         bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12326         if (!bd_num_list)
12327                 return -ENOMEM;
12328
12329         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12330         if (ret) {
12331                 dev_err(&hdev->pdev->dev,
12332                         "Get dfx reg bd num fail, status is %d.\n", ret);
12333                 goto out;
12334         }
12335
12336         bd_num_max = bd_num_list[0];
12337         for (i = 1; i < dfx_reg_type_num; i++)
12338                 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
12339
12340         buf_len = sizeof(*desc_src) * bd_num_max;
12341         desc_src = kzalloc(buf_len, GFP_KERNEL);
12342         if (!desc_src) {
12343                 ret = -ENOMEM;
12344                 goto out;
12345         }
12346
12347         for (i = 0; i < dfx_reg_type_num; i++) {
12348                 bd_num = bd_num_list[i];
12349                 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
12350                                              hclge_dfx_reg_opcode_list[i]);
12351                 if (ret) {
12352                         dev_err(&hdev->pdev->dev,
12353                                 "Get dfx reg fail, status is %d.\n", ret);
12354                         break;
12355                 }
12356
12357                 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
12358         }
12359
12360         kfree(desc_src);
12361 out:
12362         kfree(bd_num_list);
12363         return ret;
12364 }
12365
12366 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
12367                               struct hnae3_knic_private_info *kinfo)
12368 {
12369 #define HCLGE_RING_REG_OFFSET           0x200
12370 #define HCLGE_RING_INT_REG_OFFSET       0x4
12371
12372         int i, j, reg_num, separator_num;
12373         int data_num_sum;
12374         u32 *reg = data;
12375
12376         /* fetching per-PF registers valus from PF PCIe register space */
12377         reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
12378         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12379         for (i = 0; i < reg_num; i++)
12380                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
12381         for (i = 0; i < separator_num; i++)
12382                 *reg++ = SEPARATOR_VALUE;
12383         data_num_sum = reg_num + separator_num;
12384
12385         reg_num = ARRAY_SIZE(common_reg_addr_list);
12386         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12387         for (i = 0; i < reg_num; i++)
12388                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
12389         for (i = 0; i < separator_num; i++)
12390                 *reg++ = SEPARATOR_VALUE;
12391         data_num_sum += reg_num + separator_num;
12392
12393         reg_num = ARRAY_SIZE(ring_reg_addr_list);
12394         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12395         for (j = 0; j < kinfo->num_tqps; j++) {
12396                 for (i = 0; i < reg_num; i++)
12397                         *reg++ = hclge_read_dev(&hdev->hw,
12398                                                 ring_reg_addr_list[i] +
12399                                                 HCLGE_RING_REG_OFFSET * j);
12400                 for (i = 0; i < separator_num; i++)
12401                         *reg++ = SEPARATOR_VALUE;
12402         }
12403         data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
12404
12405         reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
12406         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12407         for (j = 0; j < hdev->num_msi_used - 1; j++) {
12408                 for (i = 0; i < reg_num; i++)
12409                         *reg++ = hclge_read_dev(&hdev->hw,
12410                                                 tqp_intr_reg_addr_list[i] +
12411                                                 HCLGE_RING_INT_REG_OFFSET * j);
12412                 for (i = 0; i < separator_num; i++)
12413                         *reg++ = SEPARATOR_VALUE;
12414         }
12415         data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
12416
12417         return data_num_sum;
12418 }
12419
12420 static int hclge_get_regs_len(struct hnae3_handle *handle)
12421 {
12422         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
12423         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12424         struct hclge_vport *vport = hclge_get_vport(handle);
12425         struct hclge_dev *hdev = vport->back;
12426         int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
12427         int regs_lines_32_bit, regs_lines_64_bit;
12428         int ret;
12429
12430         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12431         if (ret) {
12432                 dev_err(&hdev->pdev->dev,
12433                         "Get register number failed, ret = %d.\n", ret);
12434                 return ret;
12435         }
12436
12437         ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
12438         if (ret) {
12439                 dev_err(&hdev->pdev->dev,
12440                         "Get dfx reg len failed, ret = %d.\n", ret);
12441                 return ret;
12442         }
12443
12444         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
12445                 REG_SEPARATOR_LINE;
12446         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
12447                 REG_SEPARATOR_LINE;
12448         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
12449                 REG_SEPARATOR_LINE;
12450         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
12451                 REG_SEPARATOR_LINE;
12452         regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
12453                 REG_SEPARATOR_LINE;
12454         regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
12455                 REG_SEPARATOR_LINE;
12456
12457         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
12458                 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
12459                 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
12460 }
12461
12462 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
12463                            void *data)
12464 {
12465         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12466         struct hclge_vport *vport = hclge_get_vport(handle);
12467         struct hclge_dev *hdev = vport->back;
12468         u32 regs_num_32_bit, regs_num_64_bit;
12469         int i, reg_num, separator_num, ret;
12470         u32 *reg = data;
12471
12472         *version = hdev->fw_version;
12473
12474         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12475         if (ret) {
12476                 dev_err(&hdev->pdev->dev,
12477                         "Get register number failed, ret = %d.\n", ret);
12478                 return;
12479         }
12480
12481         reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
12482
12483         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
12484         if (ret) {
12485                 dev_err(&hdev->pdev->dev,
12486                         "Get 32 bit register failed, ret = %d.\n", ret);
12487                 return;
12488         }
12489         reg_num = regs_num_32_bit;
12490         reg += reg_num;
12491         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12492         for (i = 0; i < separator_num; i++)
12493                 *reg++ = SEPARATOR_VALUE;
12494
12495         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
12496         if (ret) {
12497                 dev_err(&hdev->pdev->dev,
12498                         "Get 64 bit register failed, ret = %d.\n", ret);
12499                 return;
12500         }
12501         reg_num = regs_num_64_bit * 2;
12502         reg += reg_num;
12503         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12504         for (i = 0; i < separator_num; i++)
12505                 *reg++ = SEPARATOR_VALUE;
12506
12507         ret = hclge_get_dfx_reg(hdev, reg);
12508         if (ret)
12509                 dev_err(&hdev->pdev->dev,
12510                         "Get dfx register failed, ret = %d.\n", ret);
12511 }
12512
12513 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12514 {
12515         struct hclge_set_led_state_cmd *req;
12516         struct hclge_desc desc;
12517         int ret;
12518
12519         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12520
12521         req = (struct hclge_set_led_state_cmd *)desc.data;
12522         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12523                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12524
12525         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12526         if (ret)
12527                 dev_err(&hdev->pdev->dev,
12528                         "Send set led state cmd error, ret =%d\n", ret);
12529
12530         return ret;
12531 }
12532
12533 enum hclge_led_status {
12534         HCLGE_LED_OFF,
12535         HCLGE_LED_ON,
12536         HCLGE_LED_NO_CHANGE = 0xFF,
12537 };
12538
12539 static int hclge_set_led_id(struct hnae3_handle *handle,
12540                             enum ethtool_phys_id_state status)
12541 {
12542         struct hclge_vport *vport = hclge_get_vport(handle);
12543         struct hclge_dev *hdev = vport->back;
12544
12545         switch (status) {
12546         case ETHTOOL_ID_ACTIVE:
12547                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
12548         case ETHTOOL_ID_INACTIVE:
12549                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12550         default:
12551                 return -EINVAL;
12552         }
12553 }
12554
12555 static void hclge_get_link_mode(struct hnae3_handle *handle,
12556                                 unsigned long *supported,
12557                                 unsigned long *advertising)
12558 {
12559         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12560         struct hclge_vport *vport = hclge_get_vport(handle);
12561         struct hclge_dev *hdev = vport->back;
12562         unsigned int idx = 0;
12563
12564         for (; idx < size; idx++) {
12565                 supported[idx] = hdev->hw.mac.supported[idx];
12566                 advertising[idx] = hdev->hw.mac.advertising[idx];
12567         }
12568 }
12569
12570 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12571 {
12572         struct hclge_vport *vport = hclge_get_vport(handle);
12573         struct hclge_dev *hdev = vport->back;
12574
12575         return hclge_config_gro(hdev, enable);
12576 }
12577
12578 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12579 {
12580         struct hclge_vport *vport = &hdev->vport[0];
12581         struct hnae3_handle *handle = &vport->nic;
12582         u8 tmp_flags;
12583         int ret;
12584         u16 i;
12585
12586         if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12587                 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12588                 vport->last_promisc_flags = vport->overflow_promisc_flags;
12589         }
12590
12591         if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
12592                 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12593                 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12594                                              tmp_flags & HNAE3_MPE);
12595                 if (!ret) {
12596                         clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12597                                   &vport->state);
12598                         set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
12599                                 &vport->state);
12600                 }
12601         }
12602
12603         for (i = 1; i < hdev->num_alloc_vport; i++) {
12604                 bool uc_en = false;
12605                 bool mc_en = false;
12606                 bool bc_en;
12607
12608                 vport = &hdev->vport[i];
12609
12610                 if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12611                                         &vport->state))
12612                         continue;
12613
12614                 if (vport->vf_info.trusted) {
12615                         uc_en = vport->vf_info.request_uc_en > 0;
12616                         mc_en = vport->vf_info.request_mc_en > 0;
12617                 }
12618                 bc_en = vport->vf_info.request_bc_en > 0;
12619
12620                 ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
12621                                                  mc_en, bc_en);
12622                 if (ret) {
12623                         set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12624                                 &vport->state);
12625                         return;
12626                 }
12627                 hclge_set_vport_vlan_fltr_change(vport);
12628         }
12629 }
12630
12631 static bool hclge_module_existed(struct hclge_dev *hdev)
12632 {
12633         struct hclge_desc desc;
12634         u32 existed;
12635         int ret;
12636
12637         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12638         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12639         if (ret) {
12640                 dev_err(&hdev->pdev->dev,
12641                         "failed to get SFP exist state, ret = %d\n", ret);
12642                 return false;
12643         }
12644
12645         existed = le32_to_cpu(desc.data[0]);
12646
12647         return existed != 0;
12648 }
12649
12650 /* need 6 bds(total 140 bytes) in one reading
12651  * return the number of bytes actually read, 0 means read failed.
12652  */
12653 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12654                                      u32 len, u8 *data)
12655 {
12656         struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12657         struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12658         u16 read_len;
12659         u16 copy_len;
12660         int ret;
12661         int i;
12662
12663         /* setup all 6 bds to read module eeprom info. */
12664         for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12665                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12666                                            true);
12667
12668                 /* bd0~bd4 need next flag */
12669                 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12670                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12671         }
12672
12673         /* setup bd0, this bd contains offset and read length. */
12674         sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12675         sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12676         read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12677         sfp_info_bd0->read_len = cpu_to_le16(read_len);
12678
12679         ret = hclge_cmd_send(&hdev->hw, desc, i);
12680         if (ret) {
12681                 dev_err(&hdev->pdev->dev,
12682                         "failed to get SFP eeprom info, ret = %d\n", ret);
12683                 return 0;
12684         }
12685
12686         /* copy sfp info from bd0 to out buffer. */
12687         copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12688         memcpy(data, sfp_info_bd0->data, copy_len);
12689         read_len = copy_len;
12690
12691         /* copy sfp info from bd1~bd5 to out buffer if needed. */
12692         for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12693                 if (read_len >= len)
12694                         return read_len;
12695
12696                 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12697                 memcpy(data + read_len, desc[i].data, copy_len);
12698                 read_len += copy_len;
12699         }
12700
12701         return read_len;
12702 }
12703
12704 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12705                                    u32 len, u8 *data)
12706 {
12707         struct hclge_vport *vport = hclge_get_vport(handle);
12708         struct hclge_dev *hdev = vport->back;
12709         u32 read_len = 0;
12710         u16 data_len;
12711
12712         if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12713                 return -EOPNOTSUPP;
12714
12715         if (!hclge_module_existed(hdev))
12716                 return -ENXIO;
12717
12718         while (read_len < len) {
12719                 data_len = hclge_get_sfp_eeprom_info(hdev,
12720                                                      offset + read_len,
12721                                                      len - read_len,
12722                                                      data + read_len);
12723                 if (!data_len)
12724                         return -EIO;
12725
12726                 read_len += data_len;
12727         }
12728
12729         return 0;
12730 }
12731
12732 static const struct hnae3_ae_ops hclge_ops = {
12733         .init_ae_dev = hclge_init_ae_dev,
12734         .uninit_ae_dev = hclge_uninit_ae_dev,
12735         .reset_prepare = hclge_reset_prepare_general,
12736         .reset_done = hclge_reset_done,
12737         .init_client_instance = hclge_init_client_instance,
12738         .uninit_client_instance = hclge_uninit_client_instance,
12739         .map_ring_to_vector = hclge_map_ring_to_vector,
12740         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12741         .get_vector = hclge_get_vector,
12742         .put_vector = hclge_put_vector,
12743         .set_promisc_mode = hclge_set_promisc_mode,
12744         .request_update_promisc_mode = hclge_request_update_promisc_mode,
12745         .set_loopback = hclge_set_loopback,
12746         .start = hclge_ae_start,
12747         .stop = hclge_ae_stop,
12748         .client_start = hclge_client_start,
12749         .client_stop = hclge_client_stop,
12750         .get_status = hclge_get_status,
12751         .get_ksettings_an_result = hclge_get_ksettings_an_result,
12752         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12753         .get_media_type = hclge_get_media_type,
12754         .check_port_speed = hclge_check_port_speed,
12755         .get_fec = hclge_get_fec,
12756         .set_fec = hclge_set_fec,
12757         .get_rss_key_size = hclge_get_rss_key_size,
12758         .get_rss = hclge_get_rss,
12759         .set_rss = hclge_set_rss,
12760         .set_rss_tuple = hclge_set_rss_tuple,
12761         .get_rss_tuple = hclge_get_rss_tuple,
12762         .get_tc_size = hclge_get_tc_size,
12763         .get_mac_addr = hclge_get_mac_addr,
12764         .set_mac_addr = hclge_set_mac_addr,
12765         .do_ioctl = hclge_do_ioctl,
12766         .add_uc_addr = hclge_add_uc_addr,
12767         .rm_uc_addr = hclge_rm_uc_addr,
12768         .add_mc_addr = hclge_add_mc_addr,
12769         .rm_mc_addr = hclge_rm_mc_addr,
12770         .set_autoneg = hclge_set_autoneg,
12771         .get_autoneg = hclge_get_autoneg,
12772         .restart_autoneg = hclge_restart_autoneg,
12773         .halt_autoneg = hclge_halt_autoneg,
12774         .get_pauseparam = hclge_get_pauseparam,
12775         .set_pauseparam = hclge_set_pauseparam,
12776         .set_mtu = hclge_set_mtu,
12777         .reset_queue = hclge_reset_tqp,
12778         .get_stats = hclge_get_stats,
12779         .get_mac_stats = hclge_get_mac_stat,
12780         .update_stats = hclge_update_stats,
12781         .get_strings = hclge_get_strings,
12782         .get_sset_count = hclge_get_sset_count,
12783         .get_fw_version = hclge_get_fw_version,
12784         .get_mdix_mode = hclge_get_mdix_mode,
12785         .enable_vlan_filter = hclge_enable_vlan_filter,
12786         .set_vlan_filter = hclge_set_vlan_filter,
12787         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12788         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
12789         .reset_event = hclge_reset_event,
12790         .get_reset_level = hclge_get_reset_level,
12791         .set_default_reset_request = hclge_set_def_reset_request,
12792         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
12793         .set_channels = hclge_set_channels,
12794         .get_channels = hclge_get_channels,
12795         .get_regs_len = hclge_get_regs_len,
12796         .get_regs = hclge_get_regs,
12797         .set_led_id = hclge_set_led_id,
12798         .get_link_mode = hclge_get_link_mode,
12799         .add_fd_entry = hclge_add_fd_entry,
12800         .del_fd_entry = hclge_del_fd_entry,
12801         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
12802         .get_fd_rule_info = hclge_get_fd_rule_info,
12803         .get_fd_all_rules = hclge_get_all_rules,
12804         .enable_fd = hclge_enable_fd,
12805         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
12806         .dbg_read_cmd = hclge_dbg_read_cmd,
12807         .handle_hw_ras_error = hclge_handle_hw_ras_error,
12808         .get_hw_reset_stat = hclge_get_hw_reset_stat,
12809         .ae_dev_resetting = hclge_ae_dev_resetting,
12810         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
12811         .set_gro_en = hclge_gro_en,
12812         .get_global_queue_id = hclge_covert_handle_qid_global,
12813         .set_timer_task = hclge_set_timer_task,
12814         .mac_connect_phy = hclge_mac_connect_phy,
12815         .mac_disconnect_phy = hclge_mac_disconnect_phy,
12816         .get_vf_config = hclge_get_vf_config,
12817         .set_vf_link_state = hclge_set_vf_link_state,
12818         .set_vf_spoofchk = hclge_set_vf_spoofchk,
12819         .set_vf_trust = hclge_set_vf_trust,
12820         .set_vf_rate = hclge_set_vf_rate,
12821         .set_vf_mac = hclge_set_vf_mac,
12822         .get_module_eeprom = hclge_get_module_eeprom,
12823         .get_cmdq_stat = hclge_get_cmdq_stat,
12824         .add_cls_flower = hclge_add_cls_flower,
12825         .del_cls_flower = hclge_del_cls_flower,
12826         .cls_flower_active = hclge_is_cls_flower_active,
12827         .get_phy_link_ksettings = hclge_get_phy_link_ksettings,
12828         .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
12829 };
12830
12831 static struct hnae3_ae_algo ae_algo = {
12832         .ops = &hclge_ops,
12833         .pdev_id_table = ae_algo_pci_tbl,
12834 };
12835
12836 static int hclge_init(void)
12837 {
12838         pr_info("%s is initializing\n", HCLGE_NAME);
12839
12840         hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
12841         if (!hclge_wq) {
12842                 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
12843                 return -ENOMEM;
12844         }
12845
12846         hnae3_register_ae_algo(&ae_algo);
12847
12848         return 0;
12849 }
12850
12851 static void hclge_exit(void)
12852 {
12853         hnae3_unregister_ae_algo(&ae_algo);
12854         destroy_workqueue(hclge_wq);
12855 }
12856 module_init(hclge_init);
12857 module_exit(hclge_exit);
12858
12859 MODULE_LICENSE("GPL");
12860 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
12861 MODULE_DESCRIPTION("HCLGE Driver");
12862 MODULE_VERSION(HCLGE_MOD_VERSION);