bc805d5fb16e6c1fcee72ca8b834792cfb7bf8d0
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/ipv6.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
23 #include "hclge_tm.h"
24 #include "hclge_err.h"
25 #include "hnae3.h"
26
27 #define HCLGE_NAME                      "hclge"
28 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
29 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30
31 #define HCLGE_BUF_SIZE_UNIT     256U
32 #define HCLGE_BUF_MUL_BY        2
33 #define HCLGE_BUF_DIV_BY        2
34 #define NEED_RESERVE_TC_NUM     2
35 #define BUF_MAX_PERCENT         100
36 #define BUF_RESERVE_PERCENT     90
37
38 #define HCLGE_RESET_MAX_FAIL_CNT        5
39 #define HCLGE_RESET_SYNC_TIME           100
40 #define HCLGE_PF_RESET_SYNC_TIME        20
41 #define HCLGE_PF_RESET_SYNC_CNT         1500
42
43 /* Get DFX BD number offset */
44 #define HCLGE_DFX_BIOS_BD_OFFSET        1
45 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
46 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
47 #define HCLGE_DFX_IGU_BD_OFFSET         4
48 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
49 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
50 #define HCLGE_DFX_NCSI_BD_OFFSET        7
51 #define HCLGE_DFX_RTC_BD_OFFSET         8
52 #define HCLGE_DFX_PPP_BD_OFFSET         9
53 #define HCLGE_DFX_RCB_BD_OFFSET         10
54 #define HCLGE_DFX_TQP_BD_OFFSET         11
55 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
56
57 #define HCLGE_LINK_STATUS_MS    10
58
59 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
60 static int hclge_init_vlan_config(struct hclge_dev *hdev);
61 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
62 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
63 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
64 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
65 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
66 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
67                                                    unsigned long *addr);
68 static int hclge_set_default_loopback(struct hclge_dev *hdev);
69
70 static void hclge_sync_mac_table(struct hclge_dev *hdev);
71 static void hclge_restore_hw_table(struct hclge_dev *hdev);
72 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
73 static void hclge_sync_fd_table(struct hclge_dev *hdev);
74
75 static struct hnae3_ae_algo ae_algo;
76
77 static struct workqueue_struct *hclge_wq;
78
79 static const struct pci_device_id ae_algo_pci_tbl[] = {
80         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
88         /* required last entry */
89         {0, }
90 };
91
92 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
93
94 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
95                                          HCLGE_CMDQ_TX_ADDR_H_REG,
96                                          HCLGE_CMDQ_TX_DEPTH_REG,
97                                          HCLGE_CMDQ_TX_TAIL_REG,
98                                          HCLGE_CMDQ_TX_HEAD_REG,
99                                          HCLGE_CMDQ_RX_ADDR_L_REG,
100                                          HCLGE_CMDQ_RX_ADDR_H_REG,
101                                          HCLGE_CMDQ_RX_DEPTH_REG,
102                                          HCLGE_CMDQ_RX_TAIL_REG,
103                                          HCLGE_CMDQ_RX_HEAD_REG,
104                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
105                                          HCLGE_CMDQ_INTR_STS_REG,
106                                          HCLGE_CMDQ_INTR_EN_REG,
107                                          HCLGE_CMDQ_INTR_GEN_REG};
108
109 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
110                                            HCLGE_VECTOR0_OTER_EN_REG,
111                                            HCLGE_MISC_RESET_STS_REG,
112                                            HCLGE_MISC_VECTOR_INT_STS,
113                                            HCLGE_GLOBAL_RESET_REG,
114                                            HCLGE_FUN_RST_ING,
115                                            HCLGE_GRO_EN_REG};
116
117 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
118                                          HCLGE_RING_RX_ADDR_H_REG,
119                                          HCLGE_RING_RX_BD_NUM_REG,
120                                          HCLGE_RING_RX_BD_LENGTH_REG,
121                                          HCLGE_RING_RX_MERGE_EN_REG,
122                                          HCLGE_RING_RX_TAIL_REG,
123                                          HCLGE_RING_RX_HEAD_REG,
124                                          HCLGE_RING_RX_FBD_NUM_REG,
125                                          HCLGE_RING_RX_OFFSET_REG,
126                                          HCLGE_RING_RX_FBD_OFFSET_REG,
127                                          HCLGE_RING_RX_STASH_REG,
128                                          HCLGE_RING_RX_BD_ERR_REG,
129                                          HCLGE_RING_TX_ADDR_L_REG,
130                                          HCLGE_RING_TX_ADDR_H_REG,
131                                          HCLGE_RING_TX_BD_NUM_REG,
132                                          HCLGE_RING_TX_PRIORITY_REG,
133                                          HCLGE_RING_TX_TC_REG,
134                                          HCLGE_RING_TX_MERGE_EN_REG,
135                                          HCLGE_RING_TX_TAIL_REG,
136                                          HCLGE_RING_TX_HEAD_REG,
137                                          HCLGE_RING_TX_FBD_NUM_REG,
138                                          HCLGE_RING_TX_OFFSET_REG,
139                                          HCLGE_RING_TX_EBD_NUM_REG,
140                                          HCLGE_RING_TX_EBD_OFFSET_REG,
141                                          HCLGE_RING_TX_BD_ERR_REG,
142                                          HCLGE_RING_EN_REG};
143
144 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
145                                              HCLGE_TQP_INTR_GL0_REG,
146                                              HCLGE_TQP_INTR_GL1_REG,
147                                              HCLGE_TQP_INTR_GL2_REG,
148                                              HCLGE_TQP_INTR_RL_REG};
149
150 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
151         "App    Loopback test",
152         "Serdes serial Loopback test",
153         "Serdes parallel Loopback test",
154         "Phy    Loopback test"
155 };
156
157 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
158         {"mac_tx_mac_pause_num",
159                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
160         {"mac_rx_mac_pause_num",
161                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
162         {"mac_tx_control_pkt_num",
163                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
164         {"mac_rx_control_pkt_num",
165                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
166         {"mac_tx_pfc_pkt_num",
167                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
168         {"mac_tx_pfc_pri0_pkt_num",
169                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
170         {"mac_tx_pfc_pri1_pkt_num",
171                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
172         {"mac_tx_pfc_pri2_pkt_num",
173                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
174         {"mac_tx_pfc_pri3_pkt_num",
175                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
176         {"mac_tx_pfc_pri4_pkt_num",
177                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
178         {"mac_tx_pfc_pri5_pkt_num",
179                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
180         {"mac_tx_pfc_pri6_pkt_num",
181                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
182         {"mac_tx_pfc_pri7_pkt_num",
183                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
184         {"mac_rx_pfc_pkt_num",
185                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
186         {"mac_rx_pfc_pri0_pkt_num",
187                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
188         {"mac_rx_pfc_pri1_pkt_num",
189                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
190         {"mac_rx_pfc_pri2_pkt_num",
191                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
192         {"mac_rx_pfc_pri3_pkt_num",
193                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
194         {"mac_rx_pfc_pri4_pkt_num",
195                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
196         {"mac_rx_pfc_pri5_pkt_num",
197                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
198         {"mac_rx_pfc_pri6_pkt_num",
199                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
200         {"mac_rx_pfc_pri7_pkt_num",
201                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
202         {"mac_tx_total_pkt_num",
203                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
204         {"mac_tx_total_oct_num",
205                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
206         {"mac_tx_good_pkt_num",
207                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
208         {"mac_tx_bad_pkt_num",
209                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
210         {"mac_tx_good_oct_num",
211                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
212         {"mac_tx_bad_oct_num",
213                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
214         {"mac_tx_uni_pkt_num",
215                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
216         {"mac_tx_multi_pkt_num",
217                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
218         {"mac_tx_broad_pkt_num",
219                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
220         {"mac_tx_undersize_pkt_num",
221                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
222         {"mac_tx_oversize_pkt_num",
223                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
224         {"mac_tx_64_oct_pkt_num",
225                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
226         {"mac_tx_65_127_oct_pkt_num",
227                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
228         {"mac_tx_128_255_oct_pkt_num",
229                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
230         {"mac_tx_256_511_oct_pkt_num",
231                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
232         {"mac_tx_512_1023_oct_pkt_num",
233                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
234         {"mac_tx_1024_1518_oct_pkt_num",
235                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
236         {"mac_tx_1519_2047_oct_pkt_num",
237                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
238         {"mac_tx_2048_4095_oct_pkt_num",
239                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
240         {"mac_tx_4096_8191_oct_pkt_num",
241                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
242         {"mac_tx_8192_9216_oct_pkt_num",
243                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
244         {"mac_tx_9217_12287_oct_pkt_num",
245                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
246         {"mac_tx_12288_16383_oct_pkt_num",
247                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
248         {"mac_tx_1519_max_good_pkt_num",
249                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
250         {"mac_tx_1519_max_bad_pkt_num",
251                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
252         {"mac_rx_total_pkt_num",
253                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
254         {"mac_rx_total_oct_num",
255                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
256         {"mac_rx_good_pkt_num",
257                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
258         {"mac_rx_bad_pkt_num",
259                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
260         {"mac_rx_good_oct_num",
261                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
262         {"mac_rx_bad_oct_num",
263                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
264         {"mac_rx_uni_pkt_num",
265                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
266         {"mac_rx_multi_pkt_num",
267                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
268         {"mac_rx_broad_pkt_num",
269                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
270         {"mac_rx_undersize_pkt_num",
271                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
272         {"mac_rx_oversize_pkt_num",
273                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
274         {"mac_rx_64_oct_pkt_num",
275                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
276         {"mac_rx_65_127_oct_pkt_num",
277                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
278         {"mac_rx_128_255_oct_pkt_num",
279                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
280         {"mac_rx_256_511_oct_pkt_num",
281                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
282         {"mac_rx_512_1023_oct_pkt_num",
283                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
284         {"mac_rx_1024_1518_oct_pkt_num",
285                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
286         {"mac_rx_1519_2047_oct_pkt_num",
287                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
288         {"mac_rx_2048_4095_oct_pkt_num",
289                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
290         {"mac_rx_4096_8191_oct_pkt_num",
291                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
292         {"mac_rx_8192_9216_oct_pkt_num",
293                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
294         {"mac_rx_9217_12287_oct_pkt_num",
295                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
296         {"mac_rx_12288_16383_oct_pkt_num",
297                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
298         {"mac_rx_1519_max_good_pkt_num",
299                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
300         {"mac_rx_1519_max_bad_pkt_num",
301                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
302
303         {"mac_tx_fragment_pkt_num",
304                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
305         {"mac_tx_undermin_pkt_num",
306                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
307         {"mac_tx_jabber_pkt_num",
308                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
309         {"mac_tx_err_all_pkt_num",
310                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
311         {"mac_tx_from_app_good_pkt_num",
312                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
313         {"mac_tx_from_app_bad_pkt_num",
314                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
315         {"mac_rx_fragment_pkt_num",
316                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
317         {"mac_rx_undermin_pkt_num",
318                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
319         {"mac_rx_jabber_pkt_num",
320                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
321         {"mac_rx_fcs_err_pkt_num",
322                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
323         {"mac_rx_send_app_good_pkt_num",
324                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
325         {"mac_rx_send_app_bad_pkt_num",
326                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
327 };
328
329 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
330         {
331                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
332                 .ethter_type = cpu_to_le16(ETH_P_LLDP),
333                 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
334                 .i_port_bitmap = 0x1,
335         },
336 };
337
338 static const u8 hclge_hash_key[] = {
339         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
340         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
341         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
342         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
343         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
344 };
345
346 static const u32 hclge_dfx_bd_offset_list[] = {
347         HCLGE_DFX_BIOS_BD_OFFSET,
348         HCLGE_DFX_SSU_0_BD_OFFSET,
349         HCLGE_DFX_SSU_1_BD_OFFSET,
350         HCLGE_DFX_IGU_BD_OFFSET,
351         HCLGE_DFX_RPU_0_BD_OFFSET,
352         HCLGE_DFX_RPU_1_BD_OFFSET,
353         HCLGE_DFX_NCSI_BD_OFFSET,
354         HCLGE_DFX_RTC_BD_OFFSET,
355         HCLGE_DFX_PPP_BD_OFFSET,
356         HCLGE_DFX_RCB_BD_OFFSET,
357         HCLGE_DFX_TQP_BD_OFFSET,
358         HCLGE_DFX_SSU_2_BD_OFFSET
359 };
360
361 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
362         HCLGE_OPC_DFX_BIOS_COMMON_REG,
363         HCLGE_OPC_DFX_SSU_REG_0,
364         HCLGE_OPC_DFX_SSU_REG_1,
365         HCLGE_OPC_DFX_IGU_EGU_REG,
366         HCLGE_OPC_DFX_RPU_REG_0,
367         HCLGE_OPC_DFX_RPU_REG_1,
368         HCLGE_OPC_DFX_NCSI_REG,
369         HCLGE_OPC_DFX_RTC_REG,
370         HCLGE_OPC_DFX_PPP_REG,
371         HCLGE_OPC_DFX_RCB_REG,
372         HCLGE_OPC_DFX_TQP_REG,
373         HCLGE_OPC_DFX_SSU_REG_2
374 };
375
376 static const struct key_info meta_data_key_info[] = {
377         { PACKET_TYPE_ID, 6},
378         { IP_FRAGEMENT, 1},
379         { ROCE_TYPE, 1},
380         { NEXT_KEY, 5},
381         { VLAN_NUMBER, 2},
382         { SRC_VPORT, 12},
383         { DST_VPORT, 12},
384         { TUNNEL_PACKET, 1},
385 };
386
387 static const struct key_info tuple_key_info[] = {
388         { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
389         { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
390         { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
391         { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
392         { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
393         { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
394         { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
395         { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
396         { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
397         { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
398         { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
399         { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
400         { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
401         { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
402         { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
403         { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
404         { INNER_DST_MAC, 48, KEY_OPT_MAC,
405           offsetof(struct hclge_fd_rule, tuples.dst_mac),
406           offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
407         { INNER_SRC_MAC, 48, KEY_OPT_MAC,
408           offsetof(struct hclge_fd_rule, tuples.src_mac),
409           offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
410         { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
411           offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
412           offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
413         { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
414         { INNER_ETH_TYPE, 16, KEY_OPT_LE16,
415           offsetof(struct hclge_fd_rule, tuples.ether_proto),
416           offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
417         { INNER_L2_RSV, 16, KEY_OPT_LE16,
418           offsetof(struct hclge_fd_rule, tuples.l2_user_def),
419           offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
420         { INNER_IP_TOS, 8, KEY_OPT_U8,
421           offsetof(struct hclge_fd_rule, tuples.ip_tos),
422           offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
423         { INNER_IP_PROTO, 8, KEY_OPT_U8,
424           offsetof(struct hclge_fd_rule, tuples.ip_proto),
425           offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
426         { INNER_SRC_IP, 32, KEY_OPT_IP,
427           offsetof(struct hclge_fd_rule, tuples.src_ip),
428           offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
429         { INNER_DST_IP, 32, KEY_OPT_IP,
430           offsetof(struct hclge_fd_rule, tuples.dst_ip),
431           offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
432         { INNER_L3_RSV, 16, KEY_OPT_LE16,
433           offsetof(struct hclge_fd_rule, tuples.l3_user_def),
434           offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
435         { INNER_SRC_PORT, 16, KEY_OPT_LE16,
436           offsetof(struct hclge_fd_rule, tuples.src_port),
437           offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
438         { INNER_DST_PORT, 16, KEY_OPT_LE16,
439           offsetof(struct hclge_fd_rule, tuples.dst_port),
440           offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
441         { INNER_L4_RSV, 32, KEY_OPT_LE32,
442           offsetof(struct hclge_fd_rule, tuples.l4_user_def),
443           offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
444 };
445
446 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
447 {
448 #define HCLGE_MAC_CMD_NUM 21
449
450         u64 *data = (u64 *)(&hdev->mac_stats);
451         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
452         __le64 *desc_data;
453         int i, k, n;
454         int ret;
455
456         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
457         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
458         if (ret) {
459                 dev_err(&hdev->pdev->dev,
460                         "Get MAC pkt stats fail, status = %d.\n", ret);
461
462                 return ret;
463         }
464
465         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
466                 /* for special opcode 0032, only the first desc has the head */
467                 if (unlikely(i == 0)) {
468                         desc_data = (__le64 *)(&desc[i].data[0]);
469                         n = HCLGE_RD_FIRST_STATS_NUM;
470                 } else {
471                         desc_data = (__le64 *)(&desc[i]);
472                         n = HCLGE_RD_OTHER_STATS_NUM;
473                 }
474
475                 for (k = 0; k < n; k++) {
476                         *data += le64_to_cpu(*desc_data);
477                         data++;
478                         desc_data++;
479                 }
480         }
481
482         return 0;
483 }
484
485 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
486 {
487         u64 *data = (u64 *)(&hdev->mac_stats);
488         struct hclge_desc *desc;
489         __le64 *desc_data;
490         u16 i, k, n;
491         int ret;
492
493         /* This may be called inside atomic sections,
494          * so GFP_ATOMIC is more suitalbe here
495          */
496         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
497         if (!desc)
498                 return -ENOMEM;
499
500         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
501         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
502         if (ret) {
503                 kfree(desc);
504                 return ret;
505         }
506
507         for (i = 0; i < desc_num; i++) {
508                 /* for special opcode 0034, only the first desc has the head */
509                 if (i == 0) {
510                         desc_data = (__le64 *)(&desc[i].data[0]);
511                         n = HCLGE_RD_FIRST_STATS_NUM;
512                 } else {
513                         desc_data = (__le64 *)(&desc[i]);
514                         n = HCLGE_RD_OTHER_STATS_NUM;
515                 }
516
517                 for (k = 0; k < n; k++) {
518                         *data += le64_to_cpu(*desc_data);
519                         data++;
520                         desc_data++;
521                 }
522         }
523
524         kfree(desc);
525
526         return 0;
527 }
528
529 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
530 {
531         struct hclge_desc desc;
532         __le32 *desc_data;
533         u32 reg_num;
534         int ret;
535
536         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
537         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
538         if (ret)
539                 return ret;
540
541         desc_data = (__le32 *)(&desc.data[0]);
542         reg_num = le32_to_cpu(*desc_data);
543
544         *desc_num = 1 + ((reg_num - 3) >> 2) +
545                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
546
547         return 0;
548 }
549
550 static int hclge_mac_update_stats(struct hclge_dev *hdev)
551 {
552         u32 desc_num;
553         int ret;
554
555         ret = hclge_mac_query_reg_num(hdev, &desc_num);
556         /* The firmware supports the new statistics acquisition method */
557         if (!ret)
558                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
559         else if (ret == -EOPNOTSUPP)
560                 ret = hclge_mac_update_stats_defective(hdev);
561         else
562                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
563
564         return ret;
565 }
566
567 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
568 {
569         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
570         struct hclge_vport *vport = hclge_get_vport(handle);
571         struct hclge_dev *hdev = vport->back;
572         struct hnae3_queue *queue;
573         struct hclge_desc desc[1];
574         struct hclge_tqp *tqp;
575         int ret, i;
576
577         for (i = 0; i < kinfo->num_tqps; i++) {
578                 queue = handle->kinfo.tqp[i];
579                 tqp = container_of(queue, struct hclge_tqp, q);
580                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
581                 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
582                                            true);
583
584                 desc[0].data[0] = cpu_to_le32(tqp->index);
585                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
586                 if (ret) {
587                         dev_err(&hdev->pdev->dev,
588                                 "Query tqp stat fail, status = %d,queue = %d\n",
589                                 ret, i);
590                         return ret;
591                 }
592                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
593                         le32_to_cpu(desc[0].data[1]);
594         }
595
596         for (i = 0; i < kinfo->num_tqps; i++) {
597                 queue = handle->kinfo.tqp[i];
598                 tqp = container_of(queue, struct hclge_tqp, q);
599                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
600                 hclge_cmd_setup_basic_desc(&desc[0],
601                                            HCLGE_OPC_QUERY_TX_STATS,
602                                            true);
603
604                 desc[0].data[0] = cpu_to_le32(tqp->index);
605                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
606                 if (ret) {
607                         dev_err(&hdev->pdev->dev,
608                                 "Query tqp stat fail, status = %d,queue = %d\n",
609                                 ret, i);
610                         return ret;
611                 }
612                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
613                         le32_to_cpu(desc[0].data[1]);
614         }
615
616         return 0;
617 }
618
619 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
620 {
621         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
622         struct hclge_tqp *tqp;
623         u64 *buff = data;
624         int i;
625
626         for (i = 0; i < kinfo->num_tqps; i++) {
627                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
628                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
629         }
630
631         for (i = 0; i < kinfo->num_tqps; i++) {
632                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
633                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
634         }
635
636         return buff;
637 }
638
639 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
640 {
641         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
642
643         /* each tqp has TX & RX two queues */
644         return kinfo->num_tqps * (2);
645 }
646
647 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
648 {
649         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
650         u8 *buff = data;
651         int i;
652
653         for (i = 0; i < kinfo->num_tqps; i++) {
654                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
655                         struct hclge_tqp, q);
656                 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
657                          tqp->index);
658                 buff = buff + ETH_GSTRING_LEN;
659         }
660
661         for (i = 0; i < kinfo->num_tqps; i++) {
662                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
663                         struct hclge_tqp, q);
664                 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
665                          tqp->index);
666                 buff = buff + ETH_GSTRING_LEN;
667         }
668
669         return buff;
670 }
671
672 static u64 *hclge_comm_get_stats(const void *comm_stats,
673                                  const struct hclge_comm_stats_str strs[],
674                                  int size, u64 *data)
675 {
676         u64 *buf = data;
677         u32 i;
678
679         for (i = 0; i < size; i++)
680                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
681
682         return buf + size;
683 }
684
685 static u8 *hclge_comm_get_strings(u32 stringset,
686                                   const struct hclge_comm_stats_str strs[],
687                                   int size, u8 *data)
688 {
689         char *buff = (char *)data;
690         u32 i;
691
692         if (stringset != ETH_SS_STATS)
693                 return buff;
694
695         for (i = 0; i < size; i++) {
696                 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
697                 buff = buff + ETH_GSTRING_LEN;
698         }
699
700         return (u8 *)buff;
701 }
702
703 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
704 {
705         struct hnae3_handle *handle;
706         int status;
707
708         handle = &hdev->vport[0].nic;
709         if (handle->client) {
710                 status = hclge_tqps_update_stats(handle);
711                 if (status) {
712                         dev_err(&hdev->pdev->dev,
713                                 "Update TQPS stats fail, status = %d.\n",
714                                 status);
715                 }
716         }
717
718         status = hclge_mac_update_stats(hdev);
719         if (status)
720                 dev_err(&hdev->pdev->dev,
721                         "Update MAC stats fail, status = %d.\n", status);
722 }
723
724 static void hclge_update_stats(struct hnae3_handle *handle,
725                                struct net_device_stats *net_stats)
726 {
727         struct hclge_vport *vport = hclge_get_vport(handle);
728         struct hclge_dev *hdev = vport->back;
729         int status;
730
731         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
732                 return;
733
734         status = hclge_mac_update_stats(hdev);
735         if (status)
736                 dev_err(&hdev->pdev->dev,
737                         "Update MAC stats fail, status = %d.\n",
738                         status);
739
740         status = hclge_tqps_update_stats(handle);
741         if (status)
742                 dev_err(&hdev->pdev->dev,
743                         "Update TQPS stats fail, status = %d.\n",
744                         status);
745
746         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
747 }
748
749 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
750 {
751 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
752                 HNAE3_SUPPORT_PHY_LOOPBACK |\
753                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
754                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
755
756         struct hclge_vport *vport = hclge_get_vport(handle);
757         struct hclge_dev *hdev = vport->back;
758         int count = 0;
759
760         /* Loopback test support rules:
761          * mac: only GE mode support
762          * serdes: all mac mode will support include GE/XGE/LGE/CGE
763          * phy: only support when phy device exist on board
764          */
765         if (stringset == ETH_SS_TEST) {
766                 /* clear loopback bit flags at first */
767                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
768                 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
769                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
770                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
771                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
772                         count += 1;
773                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
774                 }
775
776                 count += 2;
777                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
778                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
779
780                 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
781                      hdev->hw.mac.phydev->drv->set_loopback) ||
782                     hnae3_dev_phy_imp_supported(hdev)) {
783                         count += 1;
784                         handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
785                 }
786         } else if (stringset == ETH_SS_STATS) {
787                 count = ARRAY_SIZE(g_mac_stats_string) +
788                         hclge_tqps_get_sset_count(handle, stringset);
789         }
790
791         return count;
792 }
793
794 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
795                               u8 *data)
796 {
797         u8 *p = (char *)data;
798         int size;
799
800         if (stringset == ETH_SS_STATS) {
801                 size = ARRAY_SIZE(g_mac_stats_string);
802                 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
803                                            size, p);
804                 p = hclge_tqps_get_strings(handle, p);
805         } else if (stringset == ETH_SS_TEST) {
806                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
807                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
808                                ETH_GSTRING_LEN);
809                         p += ETH_GSTRING_LEN;
810                 }
811                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
812                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
813                                ETH_GSTRING_LEN);
814                         p += ETH_GSTRING_LEN;
815                 }
816                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
817                         memcpy(p,
818                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
819                                ETH_GSTRING_LEN);
820                         p += ETH_GSTRING_LEN;
821                 }
822                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
823                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
824                                ETH_GSTRING_LEN);
825                         p += ETH_GSTRING_LEN;
826                 }
827         }
828 }
829
830 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
831 {
832         struct hclge_vport *vport = hclge_get_vport(handle);
833         struct hclge_dev *hdev = vport->back;
834         u64 *p;
835
836         p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
837                                  ARRAY_SIZE(g_mac_stats_string), data);
838         p = hclge_tqps_get_stats(handle, p);
839 }
840
841 static void hclge_get_mac_stat(struct hnae3_handle *handle,
842                                struct hns3_mac_stats *mac_stats)
843 {
844         struct hclge_vport *vport = hclge_get_vport(handle);
845         struct hclge_dev *hdev = vport->back;
846
847         hclge_update_stats(handle, NULL);
848
849         mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
850         mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
851 }
852
853 static int hclge_parse_func_status(struct hclge_dev *hdev,
854                                    struct hclge_func_status_cmd *status)
855 {
856 #define HCLGE_MAC_ID_MASK       0xF
857
858         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
859                 return -EINVAL;
860
861         /* Set the pf to main pf */
862         if (status->pf_state & HCLGE_PF_STATE_MAIN)
863                 hdev->flag |= HCLGE_FLAG_MAIN;
864         else
865                 hdev->flag &= ~HCLGE_FLAG_MAIN;
866
867         hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
868         return 0;
869 }
870
871 static int hclge_query_function_status(struct hclge_dev *hdev)
872 {
873 #define HCLGE_QUERY_MAX_CNT     5
874
875         struct hclge_func_status_cmd *req;
876         struct hclge_desc desc;
877         int timeout = 0;
878         int ret;
879
880         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
881         req = (struct hclge_func_status_cmd *)desc.data;
882
883         do {
884                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
885                 if (ret) {
886                         dev_err(&hdev->pdev->dev,
887                                 "query function status failed %d.\n", ret);
888                         return ret;
889                 }
890
891                 /* Check pf reset is done */
892                 if (req->pf_state)
893                         break;
894                 usleep_range(1000, 2000);
895         } while (timeout++ < HCLGE_QUERY_MAX_CNT);
896
897         return hclge_parse_func_status(hdev, req);
898 }
899
900 static int hclge_query_pf_resource(struct hclge_dev *hdev)
901 {
902         struct hclge_pf_res_cmd *req;
903         struct hclge_desc desc;
904         int ret;
905
906         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
907         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
908         if (ret) {
909                 dev_err(&hdev->pdev->dev,
910                         "query pf resource failed %d.\n", ret);
911                 return ret;
912         }
913
914         req = (struct hclge_pf_res_cmd *)desc.data;
915         hdev->num_tqps = le16_to_cpu(req->tqp_num) +
916                          le16_to_cpu(req->ext_tqp_num);
917         hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
918
919         if (req->tx_buf_size)
920                 hdev->tx_buf_size =
921                         le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
922         else
923                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
924
925         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
926
927         if (req->dv_buf_size)
928                 hdev->dv_buf_size =
929                         le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
930         else
931                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
932
933         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
934
935         hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
936         if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
937                 dev_err(&hdev->pdev->dev,
938                         "only %u msi resources available, not enough for pf(min:2).\n",
939                         hdev->num_nic_msi);
940                 return -EINVAL;
941         }
942
943         if (hnae3_dev_roce_supported(hdev)) {
944                 hdev->num_roce_msi =
945                         le16_to_cpu(req->pf_intr_vector_number_roce);
946
947                 /* PF should have NIC vectors and Roce vectors,
948                  * NIC vectors are queued before Roce vectors.
949                  */
950                 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
951         } else {
952                 hdev->num_msi = hdev->num_nic_msi;
953         }
954
955         return 0;
956 }
957
958 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
959 {
960         switch (speed_cmd) {
961         case 6:
962                 *speed = HCLGE_MAC_SPEED_10M;
963                 break;
964         case 7:
965                 *speed = HCLGE_MAC_SPEED_100M;
966                 break;
967         case 0:
968                 *speed = HCLGE_MAC_SPEED_1G;
969                 break;
970         case 1:
971                 *speed = HCLGE_MAC_SPEED_10G;
972                 break;
973         case 2:
974                 *speed = HCLGE_MAC_SPEED_25G;
975                 break;
976         case 3:
977                 *speed = HCLGE_MAC_SPEED_40G;
978                 break;
979         case 4:
980                 *speed = HCLGE_MAC_SPEED_50G;
981                 break;
982         case 5:
983                 *speed = HCLGE_MAC_SPEED_100G;
984                 break;
985         case 8:
986                 *speed = HCLGE_MAC_SPEED_200G;
987                 break;
988         default:
989                 return -EINVAL;
990         }
991
992         return 0;
993 }
994
995 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
996 {
997         struct hclge_vport *vport = hclge_get_vport(handle);
998         struct hclge_dev *hdev = vport->back;
999         u32 speed_ability = hdev->hw.mac.speed_ability;
1000         u32 speed_bit = 0;
1001
1002         switch (speed) {
1003         case HCLGE_MAC_SPEED_10M:
1004                 speed_bit = HCLGE_SUPPORT_10M_BIT;
1005                 break;
1006         case HCLGE_MAC_SPEED_100M:
1007                 speed_bit = HCLGE_SUPPORT_100M_BIT;
1008                 break;
1009         case HCLGE_MAC_SPEED_1G:
1010                 speed_bit = HCLGE_SUPPORT_1G_BIT;
1011                 break;
1012         case HCLGE_MAC_SPEED_10G:
1013                 speed_bit = HCLGE_SUPPORT_10G_BIT;
1014                 break;
1015         case HCLGE_MAC_SPEED_25G:
1016                 speed_bit = HCLGE_SUPPORT_25G_BIT;
1017                 break;
1018         case HCLGE_MAC_SPEED_40G:
1019                 speed_bit = HCLGE_SUPPORT_40G_BIT;
1020                 break;
1021         case HCLGE_MAC_SPEED_50G:
1022                 speed_bit = HCLGE_SUPPORT_50G_BIT;
1023                 break;
1024         case HCLGE_MAC_SPEED_100G:
1025                 speed_bit = HCLGE_SUPPORT_100G_BIT;
1026                 break;
1027         case HCLGE_MAC_SPEED_200G:
1028                 speed_bit = HCLGE_SUPPORT_200G_BIT;
1029                 break;
1030         default:
1031                 return -EINVAL;
1032         }
1033
1034         if (speed_bit & speed_ability)
1035                 return 0;
1036
1037         return -EINVAL;
1038 }
1039
1040 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1041 {
1042         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1043                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1044                                  mac->supported);
1045         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1046                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1047                                  mac->supported);
1048         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1049                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1050                                  mac->supported);
1051         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1052                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1053                                  mac->supported);
1054         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1055                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1056                                  mac->supported);
1057         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1058                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1059                                  mac->supported);
1060 }
1061
1062 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1063 {
1064         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1065                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1066                                  mac->supported);
1067         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1068                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1069                                  mac->supported);
1070         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1071                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1072                                  mac->supported);
1073         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1074                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1075                                  mac->supported);
1076         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1077                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1078                                  mac->supported);
1079         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1080                 linkmode_set_bit(
1081                         ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1082                         mac->supported);
1083 }
1084
1085 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1086 {
1087         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1088                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1089                                  mac->supported);
1090         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1091                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1092                                  mac->supported);
1093         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1094                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1095                                  mac->supported);
1096         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1097                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1098                                  mac->supported);
1099         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1100                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1101                                  mac->supported);
1102         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1103                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1104                                  mac->supported);
1105 }
1106
1107 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1108 {
1109         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1110                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1111                                  mac->supported);
1112         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1113                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1114                                  mac->supported);
1115         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1116                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1117                                  mac->supported);
1118         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1119                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1120                                  mac->supported);
1121         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1122                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1123                                  mac->supported);
1124         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1125                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1126                                  mac->supported);
1127         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1128                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1129                                  mac->supported);
1130 }
1131
1132 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1133 {
1134         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1135         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1136
1137         switch (mac->speed) {
1138         case HCLGE_MAC_SPEED_10G:
1139         case HCLGE_MAC_SPEED_40G:
1140                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1141                                  mac->supported);
1142                 mac->fec_ability =
1143                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1144                 break;
1145         case HCLGE_MAC_SPEED_25G:
1146         case HCLGE_MAC_SPEED_50G:
1147                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1148                                  mac->supported);
1149                 mac->fec_ability =
1150                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1151                         BIT(HNAE3_FEC_AUTO);
1152                 break;
1153         case HCLGE_MAC_SPEED_100G:
1154         case HCLGE_MAC_SPEED_200G:
1155                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1156                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1157                 break;
1158         default:
1159                 mac->fec_ability = 0;
1160                 break;
1161         }
1162 }
1163
1164 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1165                                         u16 speed_ability)
1166 {
1167         struct hclge_mac *mac = &hdev->hw.mac;
1168
1169         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1171                                  mac->supported);
1172
1173         hclge_convert_setting_sr(mac, speed_ability);
1174         hclge_convert_setting_lr(mac, speed_ability);
1175         hclge_convert_setting_cr(mac, speed_ability);
1176         if (hnae3_dev_fec_supported(hdev))
1177                 hclge_convert_setting_fec(mac);
1178
1179         if (hnae3_dev_pause_supported(hdev))
1180                 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1181
1182         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1183         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1184 }
1185
1186 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1187                                             u16 speed_ability)
1188 {
1189         struct hclge_mac *mac = &hdev->hw.mac;
1190
1191         hclge_convert_setting_kr(mac, speed_ability);
1192         if (hnae3_dev_fec_supported(hdev))
1193                 hclge_convert_setting_fec(mac);
1194
1195         if (hnae3_dev_pause_supported(hdev))
1196                 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1197
1198         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1199         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1200 }
1201
1202 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1203                                          u16 speed_ability)
1204 {
1205         unsigned long *supported = hdev->hw.mac.supported;
1206
1207         /* default to support all speed for GE port */
1208         if (!speed_ability)
1209                 speed_ability = HCLGE_SUPPORT_GE;
1210
1211         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1212                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1213                                  supported);
1214
1215         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1216                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1217                                  supported);
1218                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1219                                  supported);
1220         }
1221
1222         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1223                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1224                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1225         }
1226
1227         if (hnae3_dev_pause_supported(hdev)) {
1228                 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1229                 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1230         }
1231
1232         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1233         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1234 }
1235
1236 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1237 {
1238         u8 media_type = hdev->hw.mac.media_type;
1239
1240         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1241                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1242         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1243                 hclge_parse_copper_link_mode(hdev, speed_ability);
1244         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1245                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1246 }
1247
1248 static u32 hclge_get_max_speed(u16 speed_ability)
1249 {
1250         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1251                 return HCLGE_MAC_SPEED_200G;
1252
1253         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1254                 return HCLGE_MAC_SPEED_100G;
1255
1256         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1257                 return HCLGE_MAC_SPEED_50G;
1258
1259         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1260                 return HCLGE_MAC_SPEED_40G;
1261
1262         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1263                 return HCLGE_MAC_SPEED_25G;
1264
1265         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1266                 return HCLGE_MAC_SPEED_10G;
1267
1268         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1269                 return HCLGE_MAC_SPEED_1G;
1270
1271         if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1272                 return HCLGE_MAC_SPEED_100M;
1273
1274         if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1275                 return HCLGE_MAC_SPEED_10M;
1276
1277         return HCLGE_MAC_SPEED_1G;
1278 }
1279
1280 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1281 {
1282 #define SPEED_ABILITY_EXT_SHIFT                 8
1283
1284         struct hclge_cfg_param_cmd *req;
1285         u64 mac_addr_tmp_high;
1286         u16 speed_ability_ext;
1287         u64 mac_addr_tmp;
1288         unsigned int i;
1289
1290         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1291
1292         /* get the configuration */
1293         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1294                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1295         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1296                                             HCLGE_CFG_TQP_DESC_N_M,
1297                                             HCLGE_CFG_TQP_DESC_N_S);
1298
1299         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1300                                         HCLGE_CFG_PHY_ADDR_M,
1301                                         HCLGE_CFG_PHY_ADDR_S);
1302         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1303                                           HCLGE_CFG_MEDIA_TP_M,
1304                                           HCLGE_CFG_MEDIA_TP_S);
1305         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1306                                           HCLGE_CFG_RX_BUF_LEN_M,
1307                                           HCLGE_CFG_RX_BUF_LEN_S);
1308         /* get mac_address */
1309         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1310         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1311                                             HCLGE_CFG_MAC_ADDR_H_M,
1312                                             HCLGE_CFG_MAC_ADDR_H_S);
1313
1314         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1315
1316         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1317                                              HCLGE_CFG_DEFAULT_SPEED_M,
1318                                              HCLGE_CFG_DEFAULT_SPEED_S);
1319         cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1320                                                HCLGE_CFG_RSS_SIZE_M,
1321                                                HCLGE_CFG_RSS_SIZE_S);
1322
1323         for (i = 0; i < ETH_ALEN; i++)
1324                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1325
1326         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1327         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1328
1329         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1330                                              HCLGE_CFG_SPEED_ABILITY_M,
1331                                              HCLGE_CFG_SPEED_ABILITY_S);
1332         speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1333                                             HCLGE_CFG_SPEED_ABILITY_EXT_M,
1334                                             HCLGE_CFG_SPEED_ABILITY_EXT_S);
1335         cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1336
1337         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1338                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1339                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1340         if (!cfg->umv_space)
1341                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1342
1343         cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1344                                                HCLGE_CFG_PF_RSS_SIZE_M,
1345                                                HCLGE_CFG_PF_RSS_SIZE_S);
1346
1347         /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1348          * power of 2, instead of reading out directly. This would
1349          * be more flexible for future changes and expansions.
1350          * When VF max  rss size field is HCLGE_CFG_RSS_SIZE_S,
1351          * it does not make sense if PF's field is 0. In this case, PF and VF
1352          * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1353          */
1354         cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1355                                1U << cfg->pf_rss_size_max :
1356                                cfg->vf_rss_size_max;
1357 }
1358
1359 /* hclge_get_cfg: query the static parameter from flash
1360  * @hdev: pointer to struct hclge_dev
1361  * @hcfg: the config structure to be getted
1362  */
1363 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1364 {
1365         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1366         struct hclge_cfg_param_cmd *req;
1367         unsigned int i;
1368         int ret;
1369
1370         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1371                 u32 offset = 0;
1372
1373                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1374                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1375                                            true);
1376                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1377                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1378                 /* Len should be united by 4 bytes when send to hardware */
1379                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1380                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1381                 req->offset = cpu_to_le32(offset);
1382         }
1383
1384         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1385         if (ret) {
1386                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1387                 return ret;
1388         }
1389
1390         hclge_parse_cfg(hcfg, desc);
1391
1392         return 0;
1393 }
1394
1395 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1396 {
1397 #define HCLGE_MAX_NON_TSO_BD_NUM                        8U
1398
1399         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1400
1401         ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1402         ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1403         ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1404         ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1405         ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1406         ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1407         ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1408 }
1409
1410 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1411                                   struct hclge_desc *desc)
1412 {
1413         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1414         struct hclge_dev_specs_0_cmd *req0;
1415         struct hclge_dev_specs_1_cmd *req1;
1416
1417         req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1418         req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1419
1420         ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1421         ae_dev->dev_specs.rss_ind_tbl_size =
1422                 le16_to_cpu(req0->rss_ind_tbl_size);
1423         ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1424         ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1425         ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1426         ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1427         ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1428         ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1429 }
1430
1431 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1432 {
1433         struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1434
1435         if (!dev_specs->max_non_tso_bd_num)
1436                 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1437         if (!dev_specs->rss_ind_tbl_size)
1438                 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1439         if (!dev_specs->rss_key_size)
1440                 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1441         if (!dev_specs->max_tm_rate)
1442                 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1443         if (!dev_specs->max_qset_num)
1444                 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1445         if (!dev_specs->max_int_gl)
1446                 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1447         if (!dev_specs->max_frm_size)
1448                 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1449 }
1450
1451 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1452 {
1453         struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1454         int ret;
1455         int i;
1456
1457         /* set default specifications as devices lower than version V3 do not
1458          * support querying specifications from firmware.
1459          */
1460         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1461                 hclge_set_default_dev_specs(hdev);
1462                 return 0;
1463         }
1464
1465         for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1466                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1467                                            true);
1468                 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1469         }
1470         hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1471
1472         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1473         if (ret)
1474                 return ret;
1475
1476         hclge_parse_dev_specs(hdev, desc);
1477         hclge_check_dev_specs(hdev);
1478
1479         return 0;
1480 }
1481
1482 static int hclge_get_cap(struct hclge_dev *hdev)
1483 {
1484         int ret;
1485
1486         ret = hclge_query_function_status(hdev);
1487         if (ret) {
1488                 dev_err(&hdev->pdev->dev,
1489                         "query function status error %d.\n", ret);
1490                 return ret;
1491         }
1492
1493         /* get pf resource */
1494         return hclge_query_pf_resource(hdev);
1495 }
1496
1497 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1498 {
1499 #define HCLGE_MIN_TX_DESC       64
1500 #define HCLGE_MIN_RX_DESC       64
1501
1502         if (!is_kdump_kernel())
1503                 return;
1504
1505         dev_info(&hdev->pdev->dev,
1506                  "Running kdump kernel. Using minimal resources\n");
1507
1508         /* minimal queue pairs equals to the number of vports */
1509         hdev->num_tqps = hdev->num_req_vfs + 1;
1510         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1511         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1512 }
1513
1514 static int hclge_configure(struct hclge_dev *hdev)
1515 {
1516         struct hclge_cfg cfg;
1517         unsigned int i;
1518         int ret;
1519
1520         ret = hclge_get_cfg(hdev, &cfg);
1521         if (ret)
1522                 return ret;
1523
1524         hdev->base_tqp_pid = 0;
1525         hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1526         hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1527         hdev->rx_buf_len = cfg.rx_buf_len;
1528         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1529         hdev->hw.mac.media_type = cfg.media_type;
1530         hdev->hw.mac.phy_addr = cfg.phy_addr;
1531         hdev->num_tx_desc = cfg.tqp_desc_num;
1532         hdev->num_rx_desc = cfg.tqp_desc_num;
1533         hdev->tm_info.num_pg = 1;
1534         hdev->tc_max = cfg.tc_num;
1535         hdev->tm_info.hw_pfc_map = 0;
1536         hdev->wanted_umv_size = cfg.umv_space;
1537
1538         if (hnae3_dev_fd_supported(hdev)) {
1539                 hdev->fd_en = true;
1540                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1541         }
1542
1543         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1544         if (ret) {
1545                 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1546                         cfg.default_speed, ret);
1547                 return ret;
1548         }
1549
1550         hclge_parse_link_mode(hdev, cfg.speed_ability);
1551
1552         hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1553
1554         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1555             (hdev->tc_max < 1)) {
1556                 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1557                          hdev->tc_max);
1558                 hdev->tc_max = 1;
1559         }
1560
1561         /* Dev does not support DCB */
1562         if (!hnae3_dev_dcb_supported(hdev)) {
1563                 hdev->tc_max = 1;
1564                 hdev->pfc_max = 0;
1565         } else {
1566                 hdev->pfc_max = hdev->tc_max;
1567         }
1568
1569         hdev->tm_info.num_tc = 1;
1570
1571         /* Currently not support uncontiuous tc */
1572         for (i = 0; i < hdev->tm_info.num_tc; i++)
1573                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1574
1575         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1576
1577         hclge_init_kdump_kernel_config(hdev);
1578
1579         /* Set the init affinity based on pci func number */
1580         i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1581         i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1582         cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1583                         &hdev->affinity_mask);
1584
1585         return ret;
1586 }
1587
1588 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1589                             u16 tso_mss_max)
1590 {
1591         struct hclge_cfg_tso_status_cmd *req;
1592         struct hclge_desc desc;
1593
1594         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1595
1596         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1597         req->tso_mss_min = cpu_to_le16(tso_mss_min);
1598         req->tso_mss_max = cpu_to_le16(tso_mss_max);
1599
1600         return hclge_cmd_send(&hdev->hw, &desc, 1);
1601 }
1602
1603 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1604 {
1605         struct hclge_cfg_gro_status_cmd *req;
1606         struct hclge_desc desc;
1607         int ret;
1608
1609         if (!hnae3_dev_gro_supported(hdev))
1610                 return 0;
1611
1612         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1613         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1614
1615         req->gro_en = en ? 1 : 0;
1616
1617         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1618         if (ret)
1619                 dev_err(&hdev->pdev->dev,
1620                         "GRO hardware config cmd failed, ret = %d\n", ret);
1621
1622         return ret;
1623 }
1624
1625 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1626 {
1627         struct hclge_tqp *tqp;
1628         int i;
1629
1630         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1631                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1632         if (!hdev->htqp)
1633                 return -ENOMEM;
1634
1635         tqp = hdev->htqp;
1636
1637         for (i = 0; i < hdev->num_tqps; i++) {
1638                 tqp->dev = &hdev->pdev->dev;
1639                 tqp->index = i;
1640
1641                 tqp->q.ae_algo = &ae_algo;
1642                 tqp->q.buf_size = hdev->rx_buf_len;
1643                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1644                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1645
1646                 /* need an extended offset to configure queues >=
1647                  * HCLGE_TQP_MAX_SIZE_DEV_V2
1648                  */
1649                 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1650                         tqp->q.io_base = hdev->hw.io_base +
1651                                          HCLGE_TQP_REG_OFFSET +
1652                                          i * HCLGE_TQP_REG_SIZE;
1653                 else
1654                         tqp->q.io_base = hdev->hw.io_base +
1655                                          HCLGE_TQP_REG_OFFSET +
1656                                          HCLGE_TQP_EXT_REG_OFFSET +
1657                                          (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1658                                          HCLGE_TQP_REG_SIZE;
1659
1660                 tqp++;
1661         }
1662
1663         return 0;
1664 }
1665
1666 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1667                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1668 {
1669         struct hclge_tqp_map_cmd *req;
1670         struct hclge_desc desc;
1671         int ret;
1672
1673         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1674
1675         req = (struct hclge_tqp_map_cmd *)desc.data;
1676         req->tqp_id = cpu_to_le16(tqp_pid);
1677         req->tqp_vf = func_id;
1678         req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1679         if (!is_pf)
1680                 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1681         req->tqp_vid = cpu_to_le16(tqp_vid);
1682
1683         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1684         if (ret)
1685                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1686
1687         return ret;
1688 }
1689
1690 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1691 {
1692         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1693         struct hclge_dev *hdev = vport->back;
1694         int i, alloced;
1695
1696         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1697              alloced < num_tqps; i++) {
1698                 if (!hdev->htqp[i].alloced) {
1699                         hdev->htqp[i].q.handle = &vport->nic;
1700                         hdev->htqp[i].q.tqp_index = alloced;
1701                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1702                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1703                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1704                         hdev->htqp[i].alloced = true;
1705                         alloced++;
1706                 }
1707         }
1708         vport->alloc_tqps = alloced;
1709         kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1710                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1711
1712         /* ensure one to one mapping between irq and queue at default */
1713         kinfo->rss_size = min_t(u16, kinfo->rss_size,
1714                                 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1715
1716         return 0;
1717 }
1718
1719 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1720                             u16 num_tx_desc, u16 num_rx_desc)
1721
1722 {
1723         struct hnae3_handle *nic = &vport->nic;
1724         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1725         struct hclge_dev *hdev = vport->back;
1726         int ret;
1727
1728         kinfo->num_tx_desc = num_tx_desc;
1729         kinfo->num_rx_desc = num_rx_desc;
1730
1731         kinfo->rx_buf_len = hdev->rx_buf_len;
1732
1733         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1734                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1735         if (!kinfo->tqp)
1736                 return -ENOMEM;
1737
1738         ret = hclge_assign_tqp(vport, num_tqps);
1739         if (ret)
1740                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1741
1742         return ret;
1743 }
1744
1745 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1746                                   struct hclge_vport *vport)
1747 {
1748         struct hnae3_handle *nic = &vport->nic;
1749         struct hnae3_knic_private_info *kinfo;
1750         u16 i;
1751
1752         kinfo = &nic->kinfo;
1753         for (i = 0; i < vport->alloc_tqps; i++) {
1754                 struct hclge_tqp *q =
1755                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1756                 bool is_pf;
1757                 int ret;
1758
1759                 is_pf = !(vport->vport_id);
1760                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1761                                              i, is_pf);
1762                 if (ret)
1763                         return ret;
1764         }
1765
1766         return 0;
1767 }
1768
1769 static int hclge_map_tqp(struct hclge_dev *hdev)
1770 {
1771         struct hclge_vport *vport = hdev->vport;
1772         u16 i, num_vport;
1773
1774         num_vport = hdev->num_req_vfs + 1;
1775         for (i = 0; i < num_vport; i++) {
1776                 int ret;
1777
1778                 ret = hclge_map_tqp_to_vport(hdev, vport);
1779                 if (ret)
1780                         return ret;
1781
1782                 vport++;
1783         }
1784
1785         return 0;
1786 }
1787
1788 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1789 {
1790         struct hnae3_handle *nic = &vport->nic;
1791         struct hclge_dev *hdev = vport->back;
1792         int ret;
1793
1794         nic->pdev = hdev->pdev;
1795         nic->ae_algo = &ae_algo;
1796         nic->numa_node_mask = hdev->numa_node_mask;
1797
1798         ret = hclge_knic_setup(vport, num_tqps,
1799                                hdev->num_tx_desc, hdev->num_rx_desc);
1800         if (ret)
1801                 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1802
1803         return ret;
1804 }
1805
1806 static int hclge_alloc_vport(struct hclge_dev *hdev)
1807 {
1808         struct pci_dev *pdev = hdev->pdev;
1809         struct hclge_vport *vport;
1810         u32 tqp_main_vport;
1811         u32 tqp_per_vport;
1812         int num_vport, i;
1813         int ret;
1814
1815         /* We need to alloc a vport for main NIC of PF */
1816         num_vport = hdev->num_req_vfs + 1;
1817
1818         if (hdev->num_tqps < num_vport) {
1819                 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1820                         hdev->num_tqps, num_vport);
1821                 return -EINVAL;
1822         }
1823
1824         /* Alloc the same number of TQPs for every vport */
1825         tqp_per_vport = hdev->num_tqps / num_vport;
1826         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1827
1828         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1829                              GFP_KERNEL);
1830         if (!vport)
1831                 return -ENOMEM;
1832
1833         hdev->vport = vport;
1834         hdev->num_alloc_vport = num_vport;
1835
1836         if (IS_ENABLED(CONFIG_PCI_IOV))
1837                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1838
1839         for (i = 0; i < num_vport; i++) {
1840                 vport->back = hdev;
1841                 vport->vport_id = i;
1842                 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1843                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1844                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1845                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1846                 INIT_LIST_HEAD(&vport->vlan_list);
1847                 INIT_LIST_HEAD(&vport->uc_mac_list);
1848                 INIT_LIST_HEAD(&vport->mc_mac_list);
1849                 spin_lock_init(&vport->mac_list_lock);
1850
1851                 if (i == 0)
1852                         ret = hclge_vport_setup(vport, tqp_main_vport);
1853                 else
1854                         ret = hclge_vport_setup(vport, tqp_per_vport);
1855                 if (ret) {
1856                         dev_err(&pdev->dev,
1857                                 "vport setup failed for vport %d, %d\n",
1858                                 i, ret);
1859                         return ret;
1860                 }
1861
1862                 vport++;
1863         }
1864
1865         return 0;
1866 }
1867
1868 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1869                                     struct hclge_pkt_buf_alloc *buf_alloc)
1870 {
1871 /* TX buffer size is unit by 128 byte */
1872 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1873 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1874         struct hclge_tx_buff_alloc_cmd *req;
1875         struct hclge_desc desc;
1876         int ret;
1877         u8 i;
1878
1879         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1880
1881         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1882         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1883                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1884
1885                 req->tx_pkt_buff[i] =
1886                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1887                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1888         }
1889
1890         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1891         if (ret)
1892                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1893                         ret);
1894
1895         return ret;
1896 }
1897
1898 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1899                                  struct hclge_pkt_buf_alloc *buf_alloc)
1900 {
1901         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1902
1903         if (ret)
1904                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1905
1906         return ret;
1907 }
1908
1909 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1910 {
1911         unsigned int i;
1912         u32 cnt = 0;
1913
1914         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1915                 if (hdev->hw_tc_map & BIT(i))
1916                         cnt++;
1917         return cnt;
1918 }
1919
1920 /* Get the number of pfc enabled TCs, which have private buffer */
1921 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1922                                   struct hclge_pkt_buf_alloc *buf_alloc)
1923 {
1924         struct hclge_priv_buf *priv;
1925         unsigned int i;
1926         int cnt = 0;
1927
1928         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1929                 priv = &buf_alloc->priv_buf[i];
1930                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1931                     priv->enable)
1932                         cnt++;
1933         }
1934
1935         return cnt;
1936 }
1937
1938 /* Get the number of pfc disabled TCs, which have private buffer */
1939 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1940                                      struct hclge_pkt_buf_alloc *buf_alloc)
1941 {
1942         struct hclge_priv_buf *priv;
1943         unsigned int i;
1944         int cnt = 0;
1945
1946         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1947                 priv = &buf_alloc->priv_buf[i];
1948                 if (hdev->hw_tc_map & BIT(i) &&
1949                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1950                     priv->enable)
1951                         cnt++;
1952         }
1953
1954         return cnt;
1955 }
1956
1957 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1958 {
1959         struct hclge_priv_buf *priv;
1960         u32 rx_priv = 0;
1961         int i;
1962
1963         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1964                 priv = &buf_alloc->priv_buf[i];
1965                 if (priv->enable)
1966                         rx_priv += priv->buf_size;
1967         }
1968         return rx_priv;
1969 }
1970
1971 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1972 {
1973         u32 i, total_tx_size = 0;
1974
1975         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1976                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1977
1978         return total_tx_size;
1979 }
1980
1981 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1982                                 struct hclge_pkt_buf_alloc *buf_alloc,
1983                                 u32 rx_all)
1984 {
1985         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1986         u32 tc_num = hclge_get_tc_num(hdev);
1987         u32 shared_buf, aligned_mps;
1988         u32 rx_priv;
1989         int i;
1990
1991         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1992
1993         if (hnae3_dev_dcb_supported(hdev))
1994                 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1995                                         hdev->dv_buf_size;
1996         else
1997                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1998                                         + hdev->dv_buf_size;
1999
2000         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2001         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2002                              HCLGE_BUF_SIZE_UNIT);
2003
2004         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2005         if (rx_all < rx_priv + shared_std)
2006                 return false;
2007
2008         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2009         buf_alloc->s_buf.buf_size = shared_buf;
2010         if (hnae3_dev_dcb_supported(hdev)) {
2011                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2012                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2013                         - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2014                                   HCLGE_BUF_SIZE_UNIT);
2015         } else {
2016                 buf_alloc->s_buf.self.high = aligned_mps +
2017                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
2018                 buf_alloc->s_buf.self.low = aligned_mps;
2019         }
2020
2021         if (hnae3_dev_dcb_supported(hdev)) {
2022                 hi_thrd = shared_buf - hdev->dv_buf_size;
2023
2024                 if (tc_num <= NEED_RESERVE_TC_NUM)
2025                         hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2026                                         / BUF_MAX_PERCENT;
2027
2028                 if (tc_num)
2029                         hi_thrd = hi_thrd / tc_num;
2030
2031                 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2032                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2033                 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2034         } else {
2035                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2036                 lo_thrd = aligned_mps;
2037         }
2038
2039         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2040                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2041                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2042         }
2043
2044         return true;
2045 }
2046
2047 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2048                                 struct hclge_pkt_buf_alloc *buf_alloc)
2049 {
2050         u32 i, total_size;
2051
2052         total_size = hdev->pkt_buf_size;
2053
2054         /* alloc tx buffer for all enabled tc */
2055         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2056                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2057
2058                 if (hdev->hw_tc_map & BIT(i)) {
2059                         if (total_size < hdev->tx_buf_size)
2060                                 return -ENOMEM;
2061
2062                         priv->tx_buf_size = hdev->tx_buf_size;
2063                 } else {
2064                         priv->tx_buf_size = 0;
2065                 }
2066
2067                 total_size -= priv->tx_buf_size;
2068         }
2069
2070         return 0;
2071 }
2072
2073 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2074                                   struct hclge_pkt_buf_alloc *buf_alloc)
2075 {
2076         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2077         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2078         unsigned int i;
2079
2080         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2081                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2082
2083                 priv->enable = 0;
2084                 priv->wl.low = 0;
2085                 priv->wl.high = 0;
2086                 priv->buf_size = 0;
2087
2088                 if (!(hdev->hw_tc_map & BIT(i)))
2089                         continue;
2090
2091                 priv->enable = 1;
2092
2093                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2094                         priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2095                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
2096                                                 HCLGE_BUF_SIZE_UNIT);
2097                 } else {
2098                         priv->wl.low = 0;
2099                         priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2100                                         aligned_mps;
2101                 }
2102
2103                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2104         }
2105
2106         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2107 }
2108
2109 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2110                                           struct hclge_pkt_buf_alloc *buf_alloc)
2111 {
2112         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2113         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2114         int i;
2115
2116         /* let the last to be cleared first */
2117         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2118                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2119                 unsigned int mask = BIT((unsigned int)i);
2120
2121                 if (hdev->hw_tc_map & mask &&
2122                     !(hdev->tm_info.hw_pfc_map & mask)) {
2123                         /* Clear the no pfc TC private buffer */
2124                         priv->wl.low = 0;
2125                         priv->wl.high = 0;
2126                         priv->buf_size = 0;
2127                         priv->enable = 0;
2128                         no_pfc_priv_num--;
2129                 }
2130
2131                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2132                     no_pfc_priv_num == 0)
2133                         break;
2134         }
2135
2136         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2137 }
2138
2139 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2140                                         struct hclge_pkt_buf_alloc *buf_alloc)
2141 {
2142         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2143         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2144         int i;
2145
2146         /* let the last to be cleared first */
2147         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2148                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2149                 unsigned int mask = BIT((unsigned int)i);
2150
2151                 if (hdev->hw_tc_map & mask &&
2152                     hdev->tm_info.hw_pfc_map & mask) {
2153                         /* Reduce the number of pfc TC with private buffer */
2154                         priv->wl.low = 0;
2155                         priv->enable = 0;
2156                         priv->wl.high = 0;
2157                         priv->buf_size = 0;
2158                         pfc_priv_num--;
2159                 }
2160
2161                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2162                     pfc_priv_num == 0)
2163                         break;
2164         }
2165
2166         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2167 }
2168
2169 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2170                                       struct hclge_pkt_buf_alloc *buf_alloc)
2171 {
2172 #define COMPENSATE_BUFFER       0x3C00
2173 #define COMPENSATE_HALF_MPS_NUM 5
2174 #define PRIV_WL_GAP             0x1800
2175
2176         u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2177         u32 tc_num = hclge_get_tc_num(hdev);
2178         u32 half_mps = hdev->mps >> 1;
2179         u32 min_rx_priv;
2180         unsigned int i;
2181
2182         if (tc_num)
2183                 rx_priv = rx_priv / tc_num;
2184
2185         if (tc_num <= NEED_RESERVE_TC_NUM)
2186                 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2187
2188         min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2189                         COMPENSATE_HALF_MPS_NUM * half_mps;
2190         min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2191         rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2192         if (rx_priv < min_rx_priv)
2193                 return false;
2194
2195         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2196                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2197
2198                 priv->enable = 0;
2199                 priv->wl.low = 0;
2200                 priv->wl.high = 0;
2201                 priv->buf_size = 0;
2202
2203                 if (!(hdev->hw_tc_map & BIT(i)))
2204                         continue;
2205
2206                 priv->enable = 1;
2207                 priv->buf_size = rx_priv;
2208                 priv->wl.high = rx_priv - hdev->dv_buf_size;
2209                 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2210         }
2211
2212         buf_alloc->s_buf.buf_size = 0;
2213
2214         return true;
2215 }
2216
2217 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2218  * @hdev: pointer to struct hclge_dev
2219  * @buf_alloc: pointer to buffer calculation data
2220  * @return: 0: calculate successful, negative: fail
2221  */
2222 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2223                                 struct hclge_pkt_buf_alloc *buf_alloc)
2224 {
2225         /* When DCB is not supported, rx private buffer is not allocated. */
2226         if (!hnae3_dev_dcb_supported(hdev)) {
2227                 u32 rx_all = hdev->pkt_buf_size;
2228
2229                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2230                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2231                         return -ENOMEM;
2232
2233                 return 0;
2234         }
2235
2236         if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2237                 return 0;
2238
2239         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2240                 return 0;
2241
2242         /* try to decrease the buffer size */
2243         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2244                 return 0;
2245
2246         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2247                 return 0;
2248
2249         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2250                 return 0;
2251
2252         return -ENOMEM;
2253 }
2254
2255 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2256                                    struct hclge_pkt_buf_alloc *buf_alloc)
2257 {
2258         struct hclge_rx_priv_buff_cmd *req;
2259         struct hclge_desc desc;
2260         int ret;
2261         int i;
2262
2263         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2264         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2265
2266         /* Alloc private buffer TCs */
2267         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2268                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2269
2270                 req->buf_num[i] =
2271                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2272                 req->buf_num[i] |=
2273                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2274         }
2275
2276         req->shared_buf =
2277                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2278                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
2279
2280         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2281         if (ret)
2282                 dev_err(&hdev->pdev->dev,
2283                         "rx private buffer alloc cmd failed %d\n", ret);
2284
2285         return ret;
2286 }
2287
2288 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2289                                    struct hclge_pkt_buf_alloc *buf_alloc)
2290 {
2291         struct hclge_rx_priv_wl_buf *req;
2292         struct hclge_priv_buf *priv;
2293         struct hclge_desc desc[2];
2294         int i, j;
2295         int ret;
2296
2297         for (i = 0; i < 2; i++) {
2298                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2299                                            false);
2300                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2301
2302                 /* The first descriptor set the NEXT bit to 1 */
2303                 if (i == 0)
2304                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2305                 else
2306                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2307
2308                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2309                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2310
2311                         priv = &buf_alloc->priv_buf[idx];
2312                         req->tc_wl[j].high =
2313                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2314                         req->tc_wl[j].high |=
2315                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2316                         req->tc_wl[j].low =
2317                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2318                         req->tc_wl[j].low |=
2319                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2320                 }
2321         }
2322
2323         /* Send 2 descriptor at one time */
2324         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2325         if (ret)
2326                 dev_err(&hdev->pdev->dev,
2327                         "rx private waterline config cmd failed %d\n",
2328                         ret);
2329         return ret;
2330 }
2331
2332 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2333                                     struct hclge_pkt_buf_alloc *buf_alloc)
2334 {
2335         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2336         struct hclge_rx_com_thrd *req;
2337         struct hclge_desc desc[2];
2338         struct hclge_tc_thrd *tc;
2339         int i, j;
2340         int ret;
2341
2342         for (i = 0; i < 2; i++) {
2343                 hclge_cmd_setup_basic_desc(&desc[i],
2344                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2345                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2346
2347                 /* The first descriptor set the NEXT bit to 1 */
2348                 if (i == 0)
2349                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2350                 else
2351                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2352
2353                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2354                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2355
2356                         req->com_thrd[j].high =
2357                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2358                         req->com_thrd[j].high |=
2359                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2360                         req->com_thrd[j].low =
2361                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2362                         req->com_thrd[j].low |=
2363                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2364                 }
2365         }
2366
2367         /* Send 2 descriptors at one time */
2368         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2369         if (ret)
2370                 dev_err(&hdev->pdev->dev,
2371                         "common threshold config cmd failed %d\n", ret);
2372         return ret;
2373 }
2374
2375 static int hclge_common_wl_config(struct hclge_dev *hdev,
2376                                   struct hclge_pkt_buf_alloc *buf_alloc)
2377 {
2378         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2379         struct hclge_rx_com_wl *req;
2380         struct hclge_desc desc;
2381         int ret;
2382
2383         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2384
2385         req = (struct hclge_rx_com_wl *)desc.data;
2386         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2387         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2388
2389         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2390         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2391
2392         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2393         if (ret)
2394                 dev_err(&hdev->pdev->dev,
2395                         "common waterline config cmd failed %d\n", ret);
2396
2397         return ret;
2398 }
2399
2400 int hclge_buffer_alloc(struct hclge_dev *hdev)
2401 {
2402         struct hclge_pkt_buf_alloc *pkt_buf;
2403         int ret;
2404
2405         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2406         if (!pkt_buf)
2407                 return -ENOMEM;
2408
2409         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2410         if (ret) {
2411                 dev_err(&hdev->pdev->dev,
2412                         "could not calc tx buffer size for all TCs %d\n", ret);
2413                 goto out;
2414         }
2415
2416         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2417         if (ret) {
2418                 dev_err(&hdev->pdev->dev,
2419                         "could not alloc tx buffers %d\n", ret);
2420                 goto out;
2421         }
2422
2423         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2424         if (ret) {
2425                 dev_err(&hdev->pdev->dev,
2426                         "could not calc rx priv buffer size for all TCs %d\n",
2427                         ret);
2428                 goto out;
2429         }
2430
2431         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2432         if (ret) {
2433                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2434                         ret);
2435                 goto out;
2436         }
2437
2438         if (hnae3_dev_dcb_supported(hdev)) {
2439                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2440                 if (ret) {
2441                         dev_err(&hdev->pdev->dev,
2442                                 "could not configure rx private waterline %d\n",
2443                                 ret);
2444                         goto out;
2445                 }
2446
2447                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2448                 if (ret) {
2449                         dev_err(&hdev->pdev->dev,
2450                                 "could not configure common threshold %d\n",
2451                                 ret);
2452                         goto out;
2453                 }
2454         }
2455
2456         ret = hclge_common_wl_config(hdev, pkt_buf);
2457         if (ret)
2458                 dev_err(&hdev->pdev->dev,
2459                         "could not configure common waterline %d\n", ret);
2460
2461 out:
2462         kfree(pkt_buf);
2463         return ret;
2464 }
2465
2466 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2467 {
2468         struct hnae3_handle *roce = &vport->roce;
2469         struct hnae3_handle *nic = &vport->nic;
2470         struct hclge_dev *hdev = vport->back;
2471
2472         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2473
2474         if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2475                 return -EINVAL;
2476
2477         roce->rinfo.base_vector = hdev->roce_base_vector;
2478
2479         roce->rinfo.netdev = nic->kinfo.netdev;
2480         roce->rinfo.roce_io_base = hdev->hw.io_base;
2481         roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2482
2483         roce->pdev = nic->pdev;
2484         roce->ae_algo = nic->ae_algo;
2485         roce->numa_node_mask = nic->numa_node_mask;
2486
2487         return 0;
2488 }
2489
2490 static int hclge_init_msi(struct hclge_dev *hdev)
2491 {
2492         struct pci_dev *pdev = hdev->pdev;
2493         int vectors;
2494         int i;
2495
2496         vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2497                                         hdev->num_msi,
2498                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2499         if (vectors < 0) {
2500                 dev_err(&pdev->dev,
2501                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2502                         vectors);
2503                 return vectors;
2504         }
2505         if (vectors < hdev->num_msi)
2506                 dev_warn(&hdev->pdev->dev,
2507                          "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2508                          hdev->num_msi, vectors);
2509
2510         hdev->num_msi = vectors;
2511         hdev->num_msi_left = vectors;
2512
2513         hdev->base_msi_vector = pdev->irq;
2514         hdev->roce_base_vector = hdev->base_msi_vector +
2515                                 hdev->num_nic_msi;
2516
2517         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2518                                            sizeof(u16), GFP_KERNEL);
2519         if (!hdev->vector_status) {
2520                 pci_free_irq_vectors(pdev);
2521                 return -ENOMEM;
2522         }
2523
2524         for (i = 0; i < hdev->num_msi; i++)
2525                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2526
2527         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2528                                         sizeof(int), GFP_KERNEL);
2529         if (!hdev->vector_irq) {
2530                 pci_free_irq_vectors(pdev);
2531                 return -ENOMEM;
2532         }
2533
2534         return 0;
2535 }
2536
2537 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2538 {
2539         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2540                 duplex = HCLGE_MAC_FULL;
2541
2542         return duplex;
2543 }
2544
2545 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2546                                       u8 duplex)
2547 {
2548         struct hclge_config_mac_speed_dup_cmd *req;
2549         struct hclge_desc desc;
2550         int ret;
2551
2552         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2553
2554         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2555
2556         if (duplex)
2557                 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2558
2559         switch (speed) {
2560         case HCLGE_MAC_SPEED_10M:
2561                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2562                                 HCLGE_CFG_SPEED_S, 6);
2563                 break;
2564         case HCLGE_MAC_SPEED_100M:
2565                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2566                                 HCLGE_CFG_SPEED_S, 7);
2567                 break;
2568         case HCLGE_MAC_SPEED_1G:
2569                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2570                                 HCLGE_CFG_SPEED_S, 0);
2571                 break;
2572         case HCLGE_MAC_SPEED_10G:
2573                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2574                                 HCLGE_CFG_SPEED_S, 1);
2575                 break;
2576         case HCLGE_MAC_SPEED_25G:
2577                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2578                                 HCLGE_CFG_SPEED_S, 2);
2579                 break;
2580         case HCLGE_MAC_SPEED_40G:
2581                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2582                                 HCLGE_CFG_SPEED_S, 3);
2583                 break;
2584         case HCLGE_MAC_SPEED_50G:
2585                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2586                                 HCLGE_CFG_SPEED_S, 4);
2587                 break;
2588         case HCLGE_MAC_SPEED_100G:
2589                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2590                                 HCLGE_CFG_SPEED_S, 5);
2591                 break;
2592         case HCLGE_MAC_SPEED_200G:
2593                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2594                                 HCLGE_CFG_SPEED_S, 8);
2595                 break;
2596         default:
2597                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2598                 return -EINVAL;
2599         }
2600
2601         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2602                       1);
2603
2604         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2605         if (ret) {
2606                 dev_err(&hdev->pdev->dev,
2607                         "mac speed/duplex config cmd failed %d.\n", ret);
2608                 return ret;
2609         }
2610
2611         return 0;
2612 }
2613
2614 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2615 {
2616         struct hclge_mac *mac = &hdev->hw.mac;
2617         int ret;
2618
2619         duplex = hclge_check_speed_dup(duplex, speed);
2620         if (!mac->support_autoneg && mac->speed == speed &&
2621             mac->duplex == duplex)
2622                 return 0;
2623
2624         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2625         if (ret)
2626                 return ret;
2627
2628         hdev->hw.mac.speed = speed;
2629         hdev->hw.mac.duplex = duplex;
2630
2631         return 0;
2632 }
2633
2634 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2635                                      u8 duplex)
2636 {
2637         struct hclge_vport *vport = hclge_get_vport(handle);
2638         struct hclge_dev *hdev = vport->back;
2639
2640         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2641 }
2642
2643 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2644 {
2645         struct hclge_config_auto_neg_cmd *req;
2646         struct hclge_desc desc;
2647         u32 flag = 0;
2648         int ret;
2649
2650         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2651
2652         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2653         if (enable)
2654                 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2655         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2656
2657         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2658         if (ret)
2659                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2660                         ret);
2661
2662         return ret;
2663 }
2664
2665 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2666 {
2667         struct hclge_vport *vport = hclge_get_vport(handle);
2668         struct hclge_dev *hdev = vport->back;
2669
2670         if (!hdev->hw.mac.support_autoneg) {
2671                 if (enable) {
2672                         dev_err(&hdev->pdev->dev,
2673                                 "autoneg is not supported by current port\n");
2674                         return -EOPNOTSUPP;
2675                 } else {
2676                         return 0;
2677                 }
2678         }
2679
2680         return hclge_set_autoneg_en(hdev, enable);
2681 }
2682
2683 static int hclge_get_autoneg(struct hnae3_handle *handle)
2684 {
2685         struct hclge_vport *vport = hclge_get_vport(handle);
2686         struct hclge_dev *hdev = vport->back;
2687         struct phy_device *phydev = hdev->hw.mac.phydev;
2688
2689         if (phydev)
2690                 return phydev->autoneg;
2691
2692         return hdev->hw.mac.autoneg;
2693 }
2694
2695 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2696 {
2697         struct hclge_vport *vport = hclge_get_vport(handle);
2698         struct hclge_dev *hdev = vport->back;
2699         int ret;
2700
2701         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2702
2703         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2704         if (ret)
2705                 return ret;
2706         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2707 }
2708
2709 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2710 {
2711         struct hclge_vport *vport = hclge_get_vport(handle);
2712         struct hclge_dev *hdev = vport->back;
2713
2714         if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2715                 return hclge_set_autoneg_en(hdev, !halt);
2716
2717         return 0;
2718 }
2719
2720 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2721 {
2722         struct hclge_config_fec_cmd *req;
2723         struct hclge_desc desc;
2724         int ret;
2725
2726         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2727
2728         req = (struct hclge_config_fec_cmd *)desc.data;
2729         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2730                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2731         if (fec_mode & BIT(HNAE3_FEC_RS))
2732                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2733                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2734         if (fec_mode & BIT(HNAE3_FEC_BASER))
2735                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2736                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2737
2738         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2739         if (ret)
2740                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2741
2742         return ret;
2743 }
2744
2745 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2746 {
2747         struct hclge_vport *vport = hclge_get_vport(handle);
2748         struct hclge_dev *hdev = vport->back;
2749         struct hclge_mac *mac = &hdev->hw.mac;
2750         int ret;
2751
2752         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2753                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2754                 return -EINVAL;
2755         }
2756
2757         ret = hclge_set_fec_hw(hdev, fec_mode);
2758         if (ret)
2759                 return ret;
2760
2761         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2762         return 0;
2763 }
2764
2765 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2766                           u8 *fec_mode)
2767 {
2768         struct hclge_vport *vport = hclge_get_vport(handle);
2769         struct hclge_dev *hdev = vport->back;
2770         struct hclge_mac *mac = &hdev->hw.mac;
2771
2772         if (fec_ability)
2773                 *fec_ability = mac->fec_ability;
2774         if (fec_mode)
2775                 *fec_mode = mac->fec_mode;
2776 }
2777
2778 static int hclge_mac_init(struct hclge_dev *hdev)
2779 {
2780         struct hclge_mac *mac = &hdev->hw.mac;
2781         int ret;
2782
2783         hdev->support_sfp_query = true;
2784         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2785         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2786                                          hdev->hw.mac.duplex);
2787         if (ret)
2788                 return ret;
2789
2790         if (hdev->hw.mac.support_autoneg) {
2791                 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2792                 if (ret)
2793                         return ret;
2794         }
2795
2796         mac->link = 0;
2797
2798         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2799                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2800                 if (ret)
2801                         return ret;
2802         }
2803
2804         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2805         if (ret) {
2806                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2807                 return ret;
2808         }
2809
2810         ret = hclge_set_default_loopback(hdev);
2811         if (ret)
2812                 return ret;
2813
2814         ret = hclge_buffer_alloc(hdev);
2815         if (ret)
2816                 dev_err(&hdev->pdev->dev,
2817                         "allocate buffer fail, ret=%d\n", ret);
2818
2819         return ret;
2820 }
2821
2822 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2823 {
2824         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2825             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2826                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2827                                     hclge_wq, &hdev->service_task, 0);
2828 }
2829
2830 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2831 {
2832         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2833             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2834                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2835                                     hclge_wq, &hdev->service_task, 0);
2836 }
2837
2838 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2839 {
2840         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2841             !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2842                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2843                                     hclge_wq, &hdev->service_task,
2844                                     delay_time);
2845 }
2846
2847 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2848 {
2849         struct hclge_link_status_cmd *req;
2850         struct hclge_desc desc;
2851         int ret;
2852
2853         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2854         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2855         if (ret) {
2856                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2857                         ret);
2858                 return ret;
2859         }
2860
2861         req = (struct hclge_link_status_cmd *)desc.data;
2862         *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2863                 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2864
2865         return 0;
2866 }
2867
2868 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2869 {
2870         struct phy_device *phydev = hdev->hw.mac.phydev;
2871
2872         *link_status = HCLGE_LINK_STATUS_DOWN;
2873
2874         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2875                 return 0;
2876
2877         if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2878                 return 0;
2879
2880         return hclge_get_mac_link_status(hdev, link_status);
2881 }
2882
2883 static void hclge_update_link_status(struct hclge_dev *hdev)
2884 {
2885         struct hnae3_handle *rhandle = &hdev->vport[0].roce;
2886         struct hnae3_handle *handle = &hdev->vport[0].nic;
2887         struct hnae3_client *rclient = hdev->roce_client;
2888         struct hnae3_client *client = hdev->nic_client;
2889         int state;
2890         int ret;
2891
2892         if (!client)
2893                 return;
2894
2895         if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2896                 return;
2897
2898         ret = hclge_get_mac_phy_link(hdev, &state);
2899         if (ret) {
2900                 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2901                 return;
2902         }
2903
2904         if (state != hdev->hw.mac.link) {
2905                 client->ops->link_status_change(handle, state);
2906                 hclge_config_mac_tnl_int(hdev, state);
2907                 if (rclient && rclient->ops->link_status_change)
2908                         rclient->ops->link_status_change(rhandle, state);
2909
2910                 hdev->hw.mac.link = state;
2911         }
2912
2913         clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2914 }
2915
2916 static void hclge_update_port_capability(struct hclge_dev *hdev,
2917                                          struct hclge_mac *mac)
2918 {
2919         if (hnae3_dev_fec_supported(hdev))
2920                 /* update fec ability by speed */
2921                 hclge_convert_setting_fec(mac);
2922
2923         /* firmware can not identify back plane type, the media type
2924          * read from configuration can help deal it
2925          */
2926         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2927             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2928                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2929         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2930                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2931
2932         if (mac->support_autoneg) {
2933                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2934                 linkmode_copy(mac->advertising, mac->supported);
2935         } else {
2936                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2937                                    mac->supported);
2938                 linkmode_zero(mac->advertising);
2939         }
2940 }
2941
2942 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2943 {
2944         struct hclge_sfp_info_cmd *resp;
2945         struct hclge_desc desc;
2946         int ret;
2947
2948         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2949         resp = (struct hclge_sfp_info_cmd *)desc.data;
2950         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2951         if (ret == -EOPNOTSUPP) {
2952                 dev_warn(&hdev->pdev->dev,
2953                          "IMP do not support get SFP speed %d\n", ret);
2954                 return ret;
2955         } else if (ret) {
2956                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2957                 return ret;
2958         }
2959
2960         *speed = le32_to_cpu(resp->speed);
2961
2962         return 0;
2963 }
2964
2965 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2966 {
2967         struct hclge_sfp_info_cmd *resp;
2968         struct hclge_desc desc;
2969         int ret;
2970
2971         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2972         resp = (struct hclge_sfp_info_cmd *)desc.data;
2973
2974         resp->query_type = QUERY_ACTIVE_SPEED;
2975
2976         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2977         if (ret == -EOPNOTSUPP) {
2978                 dev_warn(&hdev->pdev->dev,
2979                          "IMP does not support get SFP info %d\n", ret);
2980                 return ret;
2981         } else if (ret) {
2982                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2983                 return ret;
2984         }
2985
2986         /* In some case, mac speed get from IMP may be 0, it shouldn't be
2987          * set to mac->speed.
2988          */
2989         if (!le32_to_cpu(resp->speed))
2990                 return 0;
2991
2992         mac->speed = le32_to_cpu(resp->speed);
2993         /* if resp->speed_ability is 0, it means it's an old version
2994          * firmware, do not update these params
2995          */
2996         if (resp->speed_ability) {
2997                 mac->module_type = le32_to_cpu(resp->module_type);
2998                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2999                 mac->autoneg = resp->autoneg;
3000                 mac->support_autoneg = resp->autoneg_ability;
3001                 mac->speed_type = QUERY_ACTIVE_SPEED;
3002                 if (!resp->active_fec)
3003                         mac->fec_mode = 0;
3004                 else
3005                         mac->fec_mode = BIT(resp->active_fec);
3006         } else {
3007                 mac->speed_type = QUERY_SFP_SPEED;
3008         }
3009
3010         return 0;
3011 }
3012
3013 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3014                                         struct ethtool_link_ksettings *cmd)
3015 {
3016         struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3017         struct hclge_vport *vport = hclge_get_vport(handle);
3018         struct hclge_phy_link_ksetting_0_cmd *req0;
3019         struct hclge_phy_link_ksetting_1_cmd *req1;
3020         u32 supported, advertising, lp_advertising;
3021         struct hclge_dev *hdev = vport->back;
3022         int ret;
3023
3024         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3025                                    true);
3026         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3027         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3028                                    true);
3029
3030         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3031         if (ret) {
3032                 dev_err(&hdev->pdev->dev,
3033                         "failed to get phy link ksetting, ret = %d.\n", ret);
3034                 return ret;
3035         }
3036
3037         req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3038         cmd->base.autoneg = req0->autoneg;
3039         cmd->base.speed = le32_to_cpu(req0->speed);
3040         cmd->base.duplex = req0->duplex;
3041         cmd->base.port = req0->port;
3042         cmd->base.transceiver = req0->transceiver;
3043         cmd->base.phy_address = req0->phy_address;
3044         cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3045         cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3046         supported = le32_to_cpu(req0->supported);
3047         advertising = le32_to_cpu(req0->advertising);
3048         lp_advertising = le32_to_cpu(req0->lp_advertising);
3049         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3050                                                 supported);
3051         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3052                                                 advertising);
3053         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3054                                                 lp_advertising);
3055
3056         req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3057         cmd->base.master_slave_cfg = req1->master_slave_cfg;
3058         cmd->base.master_slave_state = req1->master_slave_state;
3059
3060         return 0;
3061 }
3062
3063 static int
3064 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3065                              const struct ethtool_link_ksettings *cmd)
3066 {
3067         struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3068         struct hclge_vport *vport = hclge_get_vport(handle);
3069         struct hclge_phy_link_ksetting_0_cmd *req0;
3070         struct hclge_phy_link_ksetting_1_cmd *req1;
3071         struct hclge_dev *hdev = vport->back;
3072         u32 advertising;
3073         int ret;
3074
3075         if (cmd->base.autoneg == AUTONEG_DISABLE &&
3076             ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3077              (cmd->base.duplex != DUPLEX_HALF &&
3078               cmd->base.duplex != DUPLEX_FULL)))
3079                 return -EINVAL;
3080
3081         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3082                                    false);
3083         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3084         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3085                                    false);
3086
3087         req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3088         req0->autoneg = cmd->base.autoneg;
3089         req0->speed = cpu_to_le32(cmd->base.speed);
3090         req0->duplex = cmd->base.duplex;
3091         ethtool_convert_link_mode_to_legacy_u32(&advertising,
3092                                                 cmd->link_modes.advertising);
3093         req0->advertising = cpu_to_le32(advertising);
3094         req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3095
3096         req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3097         req1->master_slave_cfg = cmd->base.master_slave_cfg;
3098
3099         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3100         if (ret) {
3101                 dev_err(&hdev->pdev->dev,
3102                         "failed to set phy link ksettings, ret = %d.\n", ret);
3103                 return ret;
3104         }
3105
3106         hdev->hw.mac.autoneg = cmd->base.autoneg;
3107         hdev->hw.mac.speed = cmd->base.speed;
3108         hdev->hw.mac.duplex = cmd->base.duplex;
3109         linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3110
3111         return 0;
3112 }
3113
3114 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3115 {
3116         struct ethtool_link_ksettings cmd;
3117         int ret;
3118
3119         if (!hnae3_dev_phy_imp_supported(hdev))
3120                 return 0;
3121
3122         ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3123         if (ret)
3124                 return ret;
3125
3126         hdev->hw.mac.autoneg = cmd.base.autoneg;
3127         hdev->hw.mac.speed = cmd.base.speed;
3128         hdev->hw.mac.duplex = cmd.base.duplex;
3129
3130         return 0;
3131 }
3132
3133 static int hclge_tp_port_init(struct hclge_dev *hdev)
3134 {
3135         struct ethtool_link_ksettings cmd;
3136
3137         if (!hnae3_dev_phy_imp_supported(hdev))
3138                 return 0;
3139
3140         cmd.base.autoneg = hdev->hw.mac.autoneg;
3141         cmd.base.speed = hdev->hw.mac.speed;
3142         cmd.base.duplex = hdev->hw.mac.duplex;
3143         linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3144
3145         return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3146 }
3147
3148 static int hclge_update_port_info(struct hclge_dev *hdev)
3149 {
3150         struct hclge_mac *mac = &hdev->hw.mac;
3151         int speed = HCLGE_MAC_SPEED_UNKNOWN;
3152         int ret;
3153
3154         /* get the port info from SFP cmd if not copper port */
3155         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3156                 return hclge_update_tp_port_info(hdev);
3157
3158         /* if IMP does not support get SFP/qSFP info, return directly */
3159         if (!hdev->support_sfp_query)
3160                 return 0;
3161
3162         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3163                 ret = hclge_get_sfp_info(hdev, mac);
3164         else
3165                 ret = hclge_get_sfp_speed(hdev, &speed);
3166
3167         if (ret == -EOPNOTSUPP) {
3168                 hdev->support_sfp_query = false;
3169                 return ret;
3170         } else if (ret) {
3171                 return ret;
3172         }
3173
3174         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3175                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3176                         hclge_update_port_capability(hdev, mac);
3177                         return 0;
3178                 }
3179                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3180                                                HCLGE_MAC_FULL);
3181         } else {
3182                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3183                         return 0; /* do nothing if no SFP */
3184
3185                 /* must config full duplex for SFP */
3186                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3187         }
3188 }
3189
3190 static int hclge_get_status(struct hnae3_handle *handle)
3191 {
3192         struct hclge_vport *vport = hclge_get_vport(handle);
3193         struct hclge_dev *hdev = vport->back;
3194
3195         hclge_update_link_status(hdev);
3196
3197         return hdev->hw.mac.link;
3198 }
3199
3200 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3201 {
3202         if (!pci_num_vf(hdev->pdev)) {
3203                 dev_err(&hdev->pdev->dev,
3204                         "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3205                 return NULL;
3206         }
3207
3208         if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3209                 dev_err(&hdev->pdev->dev,
3210                         "vf id(%d) is out of range(0 <= vfid < %d)\n",
3211                         vf, pci_num_vf(hdev->pdev));
3212                 return NULL;
3213         }
3214
3215         /* VF start from 1 in vport */
3216         vf += HCLGE_VF_VPORT_START_NUM;
3217         return &hdev->vport[vf];
3218 }
3219
3220 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3221                                struct ifla_vf_info *ivf)
3222 {
3223         struct hclge_vport *vport = hclge_get_vport(handle);
3224         struct hclge_dev *hdev = vport->back;
3225
3226         vport = hclge_get_vf_vport(hdev, vf);
3227         if (!vport)
3228                 return -EINVAL;
3229
3230         ivf->vf = vf;
3231         ivf->linkstate = vport->vf_info.link_state;
3232         ivf->spoofchk = vport->vf_info.spoofchk;
3233         ivf->trusted = vport->vf_info.trusted;
3234         ivf->min_tx_rate = 0;
3235         ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3236         ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3237         ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3238         ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3239         ether_addr_copy(ivf->mac, vport->vf_info.mac);
3240
3241         return 0;
3242 }
3243
3244 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3245                                    int link_state)
3246 {
3247         struct hclge_vport *vport = hclge_get_vport(handle);
3248         struct hclge_dev *hdev = vport->back;
3249
3250         vport = hclge_get_vf_vport(hdev, vf);
3251         if (!vport)
3252                 return -EINVAL;
3253
3254         vport->vf_info.link_state = link_state;
3255
3256         return 0;
3257 }
3258
3259 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3260 {
3261         u32 cmdq_src_reg, msix_src_reg;
3262
3263         /* fetch the events from their corresponding regs */
3264         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3265         msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3266
3267         /* Assumption: If by any chance reset and mailbox events are reported
3268          * together then we will only process reset event in this go and will
3269          * defer the processing of the mailbox events. Since, we would have not
3270          * cleared RX CMDQ event this time we would receive again another
3271          * interrupt from H/W just for the mailbox.
3272          *
3273          * check for vector0 reset event sources
3274          */
3275         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3276                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3277                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3278                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3279                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3280                 hdev->rst_stats.imp_rst_cnt++;
3281                 return HCLGE_VECTOR0_EVENT_RST;
3282         }
3283
3284         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3285                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3286                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3287                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3288                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3289                 hdev->rst_stats.global_rst_cnt++;
3290                 return HCLGE_VECTOR0_EVENT_RST;
3291         }
3292
3293         /* check for vector0 msix event source */
3294         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3295                 *clearval = msix_src_reg;
3296                 return HCLGE_VECTOR0_EVENT_ERR;
3297         }
3298
3299         /* check for vector0 mailbox(=CMDQ RX) event source */
3300         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3301                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3302                 *clearval = cmdq_src_reg;
3303                 return HCLGE_VECTOR0_EVENT_MBX;
3304         }
3305
3306         /* print other vector0 event source */
3307         dev_info(&hdev->pdev->dev,
3308                  "CMDQ INT status:0x%x, other INT status:0x%x\n",
3309                  cmdq_src_reg, msix_src_reg);
3310         *clearval = msix_src_reg;
3311
3312         return HCLGE_VECTOR0_EVENT_OTHER;
3313 }
3314
3315 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3316                                     u32 regclr)
3317 {
3318         switch (event_type) {
3319         case HCLGE_VECTOR0_EVENT_RST:
3320                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3321                 break;
3322         case HCLGE_VECTOR0_EVENT_MBX:
3323                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3324                 break;
3325         default:
3326                 break;
3327         }
3328 }
3329
3330 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3331 {
3332         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3333                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3334                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3335                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3336         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3337 }
3338
3339 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3340 {
3341         writel(enable ? 1 : 0, vector->addr);
3342 }
3343
3344 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3345 {
3346         struct hclge_dev *hdev = data;
3347         u32 clearval = 0;
3348         u32 event_cause;
3349
3350         hclge_enable_vector(&hdev->misc_vector, false);
3351         event_cause = hclge_check_event_cause(hdev, &clearval);
3352
3353         /* vector 0 interrupt is shared with reset and mailbox source events.*/
3354         switch (event_cause) {
3355         case HCLGE_VECTOR0_EVENT_ERR:
3356                 /* we do not know what type of reset is required now. This could
3357                  * only be decided after we fetch the type of errors which
3358                  * caused this event. Therefore, we will do below for now:
3359                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3360                  *    have defered type of reset to be used.
3361                  * 2. Schedule the reset service task.
3362                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3363                  *    will fetch the correct type of reset.  This would be done
3364                  *    by first decoding the types of errors.
3365                  */
3366                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3367                 fallthrough;
3368         case HCLGE_VECTOR0_EVENT_RST:
3369                 hclge_reset_task_schedule(hdev);
3370                 break;
3371         case HCLGE_VECTOR0_EVENT_MBX:
3372                 /* If we are here then,
3373                  * 1. Either we are not handling any mbx task and we are not
3374                  *    scheduled as well
3375                  *                        OR
3376                  * 2. We could be handling a mbx task but nothing more is
3377                  *    scheduled.
3378                  * In both cases, we should schedule mbx task as there are more
3379                  * mbx messages reported by this interrupt.
3380                  */
3381                 hclge_mbx_task_schedule(hdev);
3382                 break;
3383         default:
3384                 dev_warn(&hdev->pdev->dev,
3385                          "received unknown or unhandled event of vector0\n");
3386                 break;
3387         }
3388
3389         hclge_clear_event_cause(hdev, event_cause, clearval);
3390
3391         /* Enable interrupt if it is not cause by reset. And when
3392          * clearval equal to 0, it means interrupt status may be
3393          * cleared by hardware before driver reads status register.
3394          * For this case, vector0 interrupt also should be enabled.
3395          */
3396         if (!clearval ||
3397             event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3398                 hclge_enable_vector(&hdev->misc_vector, true);
3399         }
3400
3401         return IRQ_HANDLED;
3402 }
3403
3404 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3405 {
3406         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3407                 dev_warn(&hdev->pdev->dev,
3408                          "vector(vector_id %d) has been freed.\n", vector_id);
3409                 return;
3410         }
3411
3412         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3413         hdev->num_msi_left += 1;
3414         hdev->num_msi_used -= 1;
3415 }
3416
3417 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3418 {
3419         struct hclge_misc_vector *vector = &hdev->misc_vector;
3420
3421         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3422
3423         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3424         hdev->vector_status[0] = 0;
3425
3426         hdev->num_msi_left -= 1;
3427         hdev->num_msi_used += 1;
3428 }
3429
3430 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3431                                       const cpumask_t *mask)
3432 {
3433         struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3434                                               affinity_notify);
3435
3436         cpumask_copy(&hdev->affinity_mask, mask);
3437 }
3438
3439 static void hclge_irq_affinity_release(struct kref *ref)
3440 {
3441 }
3442
3443 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3444 {
3445         irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3446                               &hdev->affinity_mask);
3447
3448         hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3449         hdev->affinity_notify.release = hclge_irq_affinity_release;
3450         irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3451                                   &hdev->affinity_notify);
3452 }
3453
3454 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3455 {
3456         irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3457         irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3458 }
3459
3460 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3461 {
3462         int ret;
3463
3464         hclge_get_misc_vector(hdev);
3465
3466         /* this would be explicitly freed in the end */
3467         snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3468                  HCLGE_NAME, pci_name(hdev->pdev));
3469         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3470                           0, hdev->misc_vector.name, hdev);
3471         if (ret) {
3472                 hclge_free_vector(hdev, 0);
3473                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3474                         hdev->misc_vector.vector_irq);
3475         }
3476
3477         return ret;
3478 }
3479
3480 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3481 {
3482         free_irq(hdev->misc_vector.vector_irq, hdev);
3483         hclge_free_vector(hdev, 0);
3484 }
3485
3486 int hclge_notify_client(struct hclge_dev *hdev,
3487                         enum hnae3_reset_notify_type type)
3488 {
3489         struct hnae3_handle *handle = &hdev->vport[0].nic;
3490         struct hnae3_client *client = hdev->nic_client;
3491         int ret;
3492
3493         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3494                 return 0;
3495
3496         if (!client->ops->reset_notify)
3497                 return -EOPNOTSUPP;
3498
3499         ret = client->ops->reset_notify(handle, type);
3500         if (ret)
3501                 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3502                         type, ret);
3503
3504         return ret;
3505 }
3506
3507 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3508                                     enum hnae3_reset_notify_type type)
3509 {
3510         struct hnae3_handle *handle = &hdev->vport[0].roce;
3511         struct hnae3_client *client = hdev->roce_client;
3512         int ret;
3513
3514         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3515                 return 0;
3516
3517         if (!client->ops->reset_notify)
3518                 return -EOPNOTSUPP;
3519
3520         ret = client->ops->reset_notify(handle, type);
3521         if (ret)
3522                 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3523                         type, ret);
3524
3525         return ret;
3526 }
3527
3528 static int hclge_reset_wait(struct hclge_dev *hdev)
3529 {
3530 #define HCLGE_RESET_WATI_MS     100
3531 #define HCLGE_RESET_WAIT_CNT    350
3532
3533         u32 val, reg, reg_bit;
3534         u32 cnt = 0;
3535
3536         switch (hdev->reset_type) {
3537         case HNAE3_IMP_RESET:
3538                 reg = HCLGE_GLOBAL_RESET_REG;
3539                 reg_bit = HCLGE_IMP_RESET_BIT;
3540                 break;
3541         case HNAE3_GLOBAL_RESET:
3542                 reg = HCLGE_GLOBAL_RESET_REG;
3543                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3544                 break;
3545         case HNAE3_FUNC_RESET:
3546                 reg = HCLGE_FUN_RST_ING;
3547                 reg_bit = HCLGE_FUN_RST_ING_B;
3548                 break;
3549         default:
3550                 dev_err(&hdev->pdev->dev,
3551                         "Wait for unsupported reset type: %d\n",
3552                         hdev->reset_type);
3553                 return -EINVAL;
3554         }
3555
3556         val = hclge_read_dev(&hdev->hw, reg);
3557         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3558                 msleep(HCLGE_RESET_WATI_MS);
3559                 val = hclge_read_dev(&hdev->hw, reg);
3560                 cnt++;
3561         }
3562
3563         if (cnt >= HCLGE_RESET_WAIT_CNT) {
3564                 dev_warn(&hdev->pdev->dev,
3565                          "Wait for reset timeout: %d\n", hdev->reset_type);
3566                 return -EBUSY;
3567         }
3568
3569         return 0;
3570 }
3571
3572 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3573 {
3574         struct hclge_vf_rst_cmd *req;
3575         struct hclge_desc desc;
3576
3577         req = (struct hclge_vf_rst_cmd *)desc.data;
3578         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3579         req->dest_vfid = func_id;
3580
3581         if (reset)
3582                 req->vf_rst = 0x1;
3583
3584         return hclge_cmd_send(&hdev->hw, &desc, 1);
3585 }
3586
3587 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3588 {
3589         int i;
3590
3591         for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3592                 struct hclge_vport *vport = &hdev->vport[i];
3593                 int ret;
3594
3595                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3596                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3597                 if (ret) {
3598                         dev_err(&hdev->pdev->dev,
3599                                 "set vf(%u) rst failed %d!\n",
3600                                 vport->vport_id, ret);
3601                         return ret;
3602                 }
3603
3604                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3605                         continue;
3606
3607                 /* Inform VF to process the reset.
3608                  * hclge_inform_reset_assert_to_vf may fail if VF
3609                  * driver is not loaded.
3610                  */
3611                 ret = hclge_inform_reset_assert_to_vf(vport);
3612                 if (ret)
3613                         dev_warn(&hdev->pdev->dev,
3614                                  "inform reset to vf(%u) failed %d!\n",
3615                                  vport->vport_id, ret);
3616         }
3617
3618         return 0;
3619 }
3620
3621 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3622 {
3623         if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3624             test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3625             test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3626                 return;
3627
3628         hclge_mbx_handler(hdev);
3629
3630         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3631 }
3632
3633 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3634 {
3635         struct hclge_pf_rst_sync_cmd *req;
3636         struct hclge_desc desc;
3637         int cnt = 0;
3638         int ret;
3639
3640         req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3641         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3642
3643         do {
3644                 /* vf need to down netdev by mbx during PF or FLR reset */
3645                 hclge_mailbox_service_task(hdev);
3646
3647                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3648                 /* for compatible with old firmware, wait
3649                  * 100 ms for VF to stop IO
3650                  */
3651                 if (ret == -EOPNOTSUPP) {
3652                         msleep(HCLGE_RESET_SYNC_TIME);
3653                         return;
3654                 } else if (ret) {
3655                         dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3656                                  ret);
3657                         return;
3658                 } else if (req->all_vf_ready) {
3659                         return;
3660                 }
3661                 msleep(HCLGE_PF_RESET_SYNC_TIME);
3662                 hclge_cmd_reuse_desc(&desc, true);
3663         } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3664
3665         dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3666 }
3667
3668 void hclge_report_hw_error(struct hclge_dev *hdev,
3669                            enum hnae3_hw_error_type type)
3670 {
3671         struct hnae3_client *client = hdev->nic_client;
3672
3673         if (!client || !client->ops->process_hw_error ||
3674             !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3675                 return;
3676
3677         client->ops->process_hw_error(&hdev->vport[0].nic, type);
3678 }
3679
3680 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3681 {
3682         u32 reg_val;
3683
3684         reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3685         if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3686                 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3687                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3688                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3689         }
3690
3691         if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3692                 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3693                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3694                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3695         }
3696 }
3697
3698 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3699 {
3700         struct hclge_desc desc;
3701         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3702         int ret;
3703
3704         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3705         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3706         req->fun_reset_vfid = func_id;
3707
3708         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3709         if (ret)
3710                 dev_err(&hdev->pdev->dev,
3711                         "send function reset cmd fail, status =%d\n", ret);
3712
3713         return ret;
3714 }
3715
3716 static void hclge_do_reset(struct hclge_dev *hdev)
3717 {
3718         struct hnae3_handle *handle = &hdev->vport[0].nic;
3719         struct pci_dev *pdev = hdev->pdev;
3720         u32 val;
3721
3722         if (hclge_get_hw_reset_stat(handle)) {
3723                 dev_info(&pdev->dev, "hardware reset not finish\n");
3724                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3725                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3726                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3727                 return;
3728         }
3729
3730         switch (hdev->reset_type) {
3731         case HNAE3_GLOBAL_RESET:
3732                 dev_info(&pdev->dev, "global reset requested\n");
3733                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3734                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3735                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3736                 break;
3737         case HNAE3_FUNC_RESET:
3738                 dev_info(&pdev->dev, "PF reset requested\n");
3739                 /* schedule again to check later */
3740                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3741                 hclge_reset_task_schedule(hdev);
3742                 break;
3743         default:
3744                 dev_warn(&pdev->dev,
3745                          "unsupported reset type: %d\n", hdev->reset_type);
3746                 break;
3747         }
3748 }
3749
3750 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3751                                                    unsigned long *addr)
3752 {
3753         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3754         struct hclge_dev *hdev = ae_dev->priv;
3755
3756         /* first, resolve any unknown reset type to the known type(s) */
3757         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3758                 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3759                                         HCLGE_MISC_VECTOR_INT_STS);
3760                 /* we will intentionally ignore any errors from this function
3761                  *  as we will end up in *some* reset request in any case
3762                  */
3763                 if (hclge_handle_hw_msix_error(hdev, addr))
3764                         dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3765                                  msix_sts_reg);
3766
3767                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3768                 /* We defered the clearing of the error event which caused
3769                  * interrupt since it was not posssible to do that in
3770                  * interrupt context (and this is the reason we introduced
3771                  * new UNKNOWN reset type). Now, the errors have been
3772                  * handled and cleared in hardware we can safely enable
3773                  * interrupts. This is an exception to the norm.
3774                  */
3775                 hclge_enable_vector(&hdev->misc_vector, true);
3776         }
3777
3778         /* return the highest priority reset level amongst all */
3779         if (test_bit(HNAE3_IMP_RESET, addr)) {
3780                 rst_level = HNAE3_IMP_RESET;
3781                 clear_bit(HNAE3_IMP_RESET, addr);
3782                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3783                 clear_bit(HNAE3_FUNC_RESET, addr);
3784         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3785                 rst_level = HNAE3_GLOBAL_RESET;
3786                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3787                 clear_bit(HNAE3_FUNC_RESET, addr);
3788         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3789                 rst_level = HNAE3_FUNC_RESET;
3790                 clear_bit(HNAE3_FUNC_RESET, addr);
3791         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3792                 rst_level = HNAE3_FLR_RESET;
3793                 clear_bit(HNAE3_FLR_RESET, addr);
3794         }
3795
3796         if (hdev->reset_type != HNAE3_NONE_RESET &&
3797             rst_level < hdev->reset_type)
3798                 return HNAE3_NONE_RESET;
3799
3800         return rst_level;
3801 }
3802
3803 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3804 {
3805         u32 clearval = 0;
3806
3807         switch (hdev->reset_type) {
3808         case HNAE3_IMP_RESET:
3809                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3810                 break;
3811         case HNAE3_GLOBAL_RESET:
3812                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3813                 break;
3814         default:
3815                 break;
3816         }
3817
3818         if (!clearval)
3819                 return;
3820
3821         /* For revision 0x20, the reset interrupt source
3822          * can only be cleared after hardware reset done
3823          */
3824         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3825                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3826                                 clearval);
3827
3828         hclge_enable_vector(&hdev->misc_vector, true);
3829 }
3830
3831 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3832 {
3833         u32 reg_val;
3834
3835         reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3836         if (enable)
3837                 reg_val |= HCLGE_NIC_SW_RST_RDY;
3838         else
3839                 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3840
3841         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3842 }
3843
3844 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3845 {
3846         int ret;
3847
3848         ret = hclge_set_all_vf_rst(hdev, true);
3849         if (ret)
3850                 return ret;
3851
3852         hclge_func_reset_sync_vf(hdev);
3853
3854         return 0;
3855 }
3856
3857 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3858 {
3859         u32 reg_val;
3860         int ret = 0;
3861
3862         switch (hdev->reset_type) {
3863         case HNAE3_FUNC_RESET:
3864                 ret = hclge_func_reset_notify_vf(hdev);
3865                 if (ret)
3866                         return ret;
3867
3868                 ret = hclge_func_reset_cmd(hdev, 0);
3869                 if (ret) {
3870                         dev_err(&hdev->pdev->dev,
3871                                 "asserting function reset fail %d!\n", ret);
3872                         return ret;
3873                 }
3874
3875                 /* After performaning pf reset, it is not necessary to do the
3876                  * mailbox handling or send any command to firmware, because
3877                  * any mailbox handling or command to firmware is only valid
3878                  * after hclge_cmd_init is called.
3879                  */
3880                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3881                 hdev->rst_stats.pf_rst_cnt++;
3882                 break;
3883         case HNAE3_FLR_RESET:
3884                 ret = hclge_func_reset_notify_vf(hdev);
3885                 if (ret)
3886                         return ret;
3887                 break;
3888         case HNAE3_IMP_RESET:
3889                 hclge_handle_imp_error(hdev);
3890                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3891                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3892                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3893                 break;
3894         default:
3895                 break;
3896         }
3897
3898         /* inform hardware that preparatory work is done */
3899         msleep(HCLGE_RESET_SYNC_TIME);
3900         hclge_reset_handshake(hdev, true);
3901         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3902
3903         return ret;
3904 }
3905
3906 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3907 {
3908 #define MAX_RESET_FAIL_CNT 5
3909
3910         if (hdev->reset_pending) {
3911                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3912                          hdev->reset_pending);
3913                 return true;
3914         } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3915                    HCLGE_RESET_INT_M) {
3916                 dev_info(&hdev->pdev->dev,
3917                          "reset failed because new reset interrupt\n");
3918                 hclge_clear_reset_cause(hdev);
3919                 return false;
3920         } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3921                 hdev->rst_stats.reset_fail_cnt++;
3922                 set_bit(hdev->reset_type, &hdev->reset_pending);
3923                 dev_info(&hdev->pdev->dev,
3924                          "re-schedule reset task(%u)\n",
3925                          hdev->rst_stats.reset_fail_cnt);
3926                 return true;
3927         }
3928
3929         hclge_clear_reset_cause(hdev);
3930
3931         /* recover the handshake status when reset fail */
3932         hclge_reset_handshake(hdev, true);
3933
3934         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3935
3936         hclge_dbg_dump_rst_info(hdev);
3937
3938         set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3939
3940         return false;
3941 }
3942
3943 static void hclge_update_reset_level(struct hclge_dev *hdev)
3944 {
3945         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3946         enum hnae3_reset_type reset_level;
3947
3948         /* if default_reset_request has a higher level reset request,
3949          * it should be handled as soon as possible. since some errors
3950          * need this kind of reset to fix.
3951          */
3952         reset_level = hclge_get_reset_level(ae_dev,
3953                                             &hdev->default_reset_request);
3954         if (reset_level != HNAE3_NONE_RESET)
3955                 set_bit(reset_level, &hdev->reset_request);
3956 }
3957
3958 static int hclge_set_rst_done(struct hclge_dev *hdev)
3959 {
3960         struct hclge_pf_rst_done_cmd *req;
3961         struct hclge_desc desc;
3962         int ret;
3963
3964         req = (struct hclge_pf_rst_done_cmd *)desc.data;
3965         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3966         req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3967
3968         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3969         /* To be compatible with the old firmware, which does not support
3970          * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3971          * return success
3972          */
3973         if (ret == -EOPNOTSUPP) {
3974                 dev_warn(&hdev->pdev->dev,
3975                          "current firmware does not support command(0x%x)!\n",
3976                          HCLGE_OPC_PF_RST_DONE);
3977                 return 0;
3978         } else if (ret) {
3979                 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3980                         ret);
3981         }
3982
3983         return ret;
3984 }
3985
3986 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3987 {
3988         int ret = 0;
3989
3990         switch (hdev->reset_type) {
3991         case HNAE3_FUNC_RESET:
3992         case HNAE3_FLR_RESET:
3993                 ret = hclge_set_all_vf_rst(hdev, false);
3994                 break;
3995         case HNAE3_GLOBAL_RESET:
3996         case HNAE3_IMP_RESET:
3997                 ret = hclge_set_rst_done(hdev);
3998                 break;
3999         default:
4000                 break;
4001         }
4002
4003         /* clear up the handshake status after re-initialize done */
4004         hclge_reset_handshake(hdev, false);
4005
4006         return ret;
4007 }
4008
4009 static int hclge_reset_stack(struct hclge_dev *hdev)
4010 {
4011         int ret;
4012
4013         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4014         if (ret)
4015                 return ret;
4016
4017         ret = hclge_reset_ae_dev(hdev->ae_dev);
4018         if (ret)
4019                 return ret;
4020
4021         return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4022 }
4023
4024 static int hclge_reset_prepare(struct hclge_dev *hdev)
4025 {
4026         int ret;
4027
4028         hdev->rst_stats.reset_cnt++;
4029         /* perform reset of the stack & ae device for a client */
4030         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4031         if (ret)
4032                 return ret;
4033
4034         rtnl_lock();
4035         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4036         rtnl_unlock();
4037         if (ret)
4038                 return ret;
4039
4040         return hclge_reset_prepare_wait(hdev);
4041 }
4042
4043 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4044 {
4045         int ret;
4046
4047         hdev->rst_stats.hw_reset_done_cnt++;
4048
4049         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4050         if (ret)
4051                 return ret;
4052
4053         rtnl_lock();
4054         ret = hclge_reset_stack(hdev);
4055         rtnl_unlock();
4056         if (ret)
4057                 return ret;
4058
4059         hclge_clear_reset_cause(hdev);
4060
4061         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4062         /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4063          * times
4064          */
4065         if (ret &&
4066             hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4067                 return ret;
4068
4069         ret = hclge_reset_prepare_up(hdev);
4070         if (ret)
4071                 return ret;
4072
4073         rtnl_lock();
4074         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4075         rtnl_unlock();
4076         if (ret)
4077                 return ret;
4078
4079         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4080         if (ret)
4081                 return ret;
4082
4083         hdev->last_reset_time = jiffies;
4084         hdev->rst_stats.reset_fail_cnt = 0;
4085         hdev->rst_stats.reset_done_cnt++;
4086         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4087
4088         hclge_update_reset_level(hdev);
4089
4090         return 0;
4091 }
4092
4093 static void hclge_reset(struct hclge_dev *hdev)
4094 {
4095         if (hclge_reset_prepare(hdev))
4096                 goto err_reset;
4097
4098         if (hclge_reset_wait(hdev))
4099                 goto err_reset;
4100
4101         if (hclge_reset_rebuild(hdev))
4102                 goto err_reset;
4103
4104         return;
4105
4106 err_reset:
4107         if (hclge_reset_err_handle(hdev))
4108                 hclge_reset_task_schedule(hdev);
4109 }
4110
4111 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4112 {
4113         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4114         struct hclge_dev *hdev = ae_dev->priv;
4115
4116         /* We might end up getting called broadly because of 2 below cases:
4117          * 1. Recoverable error was conveyed through APEI and only way to bring
4118          *    normalcy is to reset.
4119          * 2. A new reset request from the stack due to timeout
4120          *
4121          * For the first case,error event might not have ae handle available.
4122          * check if this is a new reset request and we are not here just because
4123          * last reset attempt did not succeed and watchdog hit us again. We will
4124          * know this if last reset request did not occur very recently (watchdog
4125          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4126          * In case of new request we reset the "reset level" to PF reset.
4127          * And if it is a repeat reset request of the most recent one then we
4128          * want to make sure we throttle the reset request. Therefore, we will
4129          * not allow it again before 3*HZ times.
4130          */
4131         if (!handle)
4132                 handle = &hdev->vport[0].nic;
4133
4134         if (time_before(jiffies, (hdev->last_reset_time +
4135                                   HCLGE_RESET_INTERVAL))) {
4136                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4137                 return;
4138         } else if (hdev->default_reset_request) {
4139                 hdev->reset_level =
4140                         hclge_get_reset_level(ae_dev,
4141                                               &hdev->default_reset_request);
4142         } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4143                 hdev->reset_level = HNAE3_FUNC_RESET;
4144         }
4145
4146         dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4147                  hdev->reset_level);
4148
4149         /* request reset & schedule reset task */
4150         set_bit(hdev->reset_level, &hdev->reset_request);
4151         hclge_reset_task_schedule(hdev);
4152
4153         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4154                 hdev->reset_level++;
4155 }
4156
4157 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4158                                         enum hnae3_reset_type rst_type)
4159 {
4160         struct hclge_dev *hdev = ae_dev->priv;
4161
4162         set_bit(rst_type, &hdev->default_reset_request);
4163 }
4164
4165 static void hclge_reset_timer(struct timer_list *t)
4166 {
4167         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4168
4169         /* if default_reset_request has no value, it means that this reset
4170          * request has already be handled, so just return here
4171          */
4172         if (!hdev->default_reset_request)
4173                 return;
4174
4175         dev_info(&hdev->pdev->dev,
4176                  "triggering reset in reset timer\n");
4177         hclge_reset_event(hdev->pdev, NULL);
4178 }
4179
4180 static void hclge_reset_subtask(struct hclge_dev *hdev)
4181 {
4182         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4183
4184         /* check if there is any ongoing reset in the hardware. This status can
4185          * be checked from reset_pending. If there is then, we need to wait for
4186          * hardware to complete reset.
4187          *    a. If we are able to figure out in reasonable time that hardware
4188          *       has fully resetted then, we can proceed with driver, client
4189          *       reset.
4190          *    b. else, we can come back later to check this status so re-sched
4191          *       now.
4192          */
4193         hdev->last_reset_time = jiffies;
4194         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4195         if (hdev->reset_type != HNAE3_NONE_RESET)
4196                 hclge_reset(hdev);
4197
4198         /* check if we got any *new* reset requests to be honored */
4199         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4200         if (hdev->reset_type != HNAE3_NONE_RESET)
4201                 hclge_do_reset(hdev);
4202
4203         hdev->reset_type = HNAE3_NONE_RESET;
4204 }
4205
4206 static void hclge_reset_service_task(struct hclge_dev *hdev)
4207 {
4208         if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4209                 return;
4210
4211         down(&hdev->reset_sem);
4212         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4213
4214         hclge_reset_subtask(hdev);
4215
4216         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4217         up(&hdev->reset_sem);
4218 }
4219
4220 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4221 {
4222         int i;
4223
4224         /* start from vport 1 for PF is always alive */
4225         for (i = 1; i < hdev->num_alloc_vport; i++) {
4226                 struct hclge_vport *vport = &hdev->vport[i];
4227
4228                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4229                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4230
4231                 /* If vf is not alive, set to default value */
4232                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4233                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4234         }
4235 }
4236
4237 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4238 {
4239         unsigned long delta = round_jiffies_relative(HZ);
4240
4241         if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4242                 return;
4243
4244         /* Always handle the link updating to make sure link state is
4245          * updated when it is triggered by mbx.
4246          */
4247         hclge_update_link_status(hdev);
4248         hclge_sync_mac_table(hdev);
4249         hclge_sync_promisc_mode(hdev);
4250         hclge_sync_fd_table(hdev);
4251
4252         if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4253                 delta = jiffies - hdev->last_serv_processed;
4254
4255                 if (delta < round_jiffies_relative(HZ)) {
4256                         delta = round_jiffies_relative(HZ) - delta;
4257                         goto out;
4258                 }
4259         }
4260
4261         hdev->serv_processed_cnt++;
4262         hclge_update_vport_alive(hdev);
4263
4264         if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4265                 hdev->last_serv_processed = jiffies;
4266                 goto out;
4267         }
4268
4269         if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4270                 hclge_update_stats_for_all(hdev);
4271
4272         hclge_update_port_info(hdev);
4273         hclge_sync_vlan_filter(hdev);
4274
4275         if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4276                 hclge_rfs_filter_expire(hdev);
4277
4278         hdev->last_serv_processed = jiffies;
4279
4280 out:
4281         hclge_task_schedule(hdev, delta);
4282 }
4283
4284 static void hclge_service_task(struct work_struct *work)
4285 {
4286         struct hclge_dev *hdev =
4287                 container_of(work, struct hclge_dev, service_task.work);
4288
4289         hclge_reset_service_task(hdev);
4290         hclge_mailbox_service_task(hdev);
4291         hclge_periodic_service_task(hdev);
4292
4293         /* Handle reset and mbx again in case periodical task delays the
4294          * handling by calling hclge_task_schedule() in
4295          * hclge_periodic_service_task().
4296          */
4297         hclge_reset_service_task(hdev);
4298         hclge_mailbox_service_task(hdev);
4299 }
4300
4301 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4302 {
4303         /* VF handle has no client */
4304         if (!handle->client)
4305                 return container_of(handle, struct hclge_vport, nic);
4306         else if (handle->client->type == HNAE3_CLIENT_ROCE)
4307                 return container_of(handle, struct hclge_vport, roce);
4308         else
4309                 return container_of(handle, struct hclge_vport, nic);
4310 }
4311
4312 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4313                                   struct hnae3_vector_info *vector_info)
4314 {
4315 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2  64
4316
4317         vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4318
4319         /* need an extend offset to config vector >= 64 */
4320         if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4321                 vector_info->io_addr = hdev->hw.io_base +
4322                                 HCLGE_VECTOR_REG_BASE +
4323                                 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4324         else
4325                 vector_info->io_addr = hdev->hw.io_base +
4326                                 HCLGE_VECTOR_EXT_REG_BASE +
4327                                 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4328                                 HCLGE_VECTOR_REG_OFFSET_H +
4329                                 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4330                                 HCLGE_VECTOR_REG_OFFSET;
4331
4332         hdev->vector_status[idx] = hdev->vport[0].vport_id;
4333         hdev->vector_irq[idx] = vector_info->vector;
4334 }
4335
4336 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4337                             struct hnae3_vector_info *vector_info)
4338 {
4339         struct hclge_vport *vport = hclge_get_vport(handle);
4340         struct hnae3_vector_info *vector = vector_info;
4341         struct hclge_dev *hdev = vport->back;
4342         int alloc = 0;
4343         u16 i = 0;
4344         u16 j;
4345
4346         vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4347         vector_num = min(hdev->num_msi_left, vector_num);
4348
4349         for (j = 0; j < vector_num; j++) {
4350                 while (++i < hdev->num_nic_msi) {
4351                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4352                                 hclge_get_vector_info(hdev, i, vector);
4353                                 vector++;
4354                                 alloc++;
4355
4356                                 break;
4357                         }
4358                 }
4359         }
4360         hdev->num_msi_left -= alloc;
4361         hdev->num_msi_used += alloc;
4362
4363         return alloc;
4364 }
4365
4366 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4367 {
4368         int i;
4369
4370         for (i = 0; i < hdev->num_msi; i++)
4371                 if (vector == hdev->vector_irq[i])
4372                         return i;
4373
4374         return -EINVAL;
4375 }
4376
4377 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4378 {
4379         struct hclge_vport *vport = hclge_get_vport(handle);
4380         struct hclge_dev *hdev = vport->back;
4381         int vector_id;
4382
4383         vector_id = hclge_get_vector_index(hdev, vector);
4384         if (vector_id < 0) {
4385                 dev_err(&hdev->pdev->dev,
4386                         "Get vector index fail. vector = %d\n", vector);
4387                 return vector_id;
4388         }
4389
4390         hclge_free_vector(hdev, vector_id);
4391
4392         return 0;
4393 }
4394
4395 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4396 {
4397         return HCLGE_RSS_KEY_SIZE;
4398 }
4399
4400 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4401                                   const u8 hfunc, const u8 *key)
4402 {
4403         struct hclge_rss_config_cmd *req;
4404         unsigned int key_offset = 0;
4405         struct hclge_desc desc;
4406         int key_counts;
4407         int key_size;
4408         int ret;
4409
4410         key_counts = HCLGE_RSS_KEY_SIZE;
4411         req = (struct hclge_rss_config_cmd *)desc.data;
4412
4413         while (key_counts) {
4414                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4415                                            false);
4416
4417                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4418                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4419
4420                 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4421                 memcpy(req->hash_key,
4422                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4423
4424                 key_counts -= key_size;
4425                 key_offset++;
4426                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4427                 if (ret) {
4428                         dev_err(&hdev->pdev->dev,
4429                                 "Configure RSS config fail, status = %d\n",
4430                                 ret);
4431                         return ret;
4432                 }
4433         }
4434         return 0;
4435 }
4436
4437 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4438 {
4439         struct hclge_rss_indirection_table_cmd *req;
4440         struct hclge_desc desc;
4441         int rss_cfg_tbl_num;
4442         u8 rss_msb_oft;
4443         u8 rss_msb_val;
4444         int ret;
4445         u16 qid;
4446         int i;
4447         u32 j;
4448
4449         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4450         rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4451                           HCLGE_RSS_CFG_TBL_SIZE;
4452
4453         for (i = 0; i < rss_cfg_tbl_num; i++) {
4454                 hclge_cmd_setup_basic_desc
4455                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4456
4457                 req->start_table_index =
4458                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4459                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4460                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4461                         qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4462                         req->rss_qid_l[j] = qid & 0xff;
4463                         rss_msb_oft =
4464                                 j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4465                         rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4466                                 (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4467                         req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4468                 }
4469                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4470                 if (ret) {
4471                         dev_err(&hdev->pdev->dev,
4472                                 "Configure rss indir table fail,status = %d\n",
4473                                 ret);
4474                         return ret;
4475                 }
4476         }
4477         return 0;
4478 }
4479
4480 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4481                                  u16 *tc_size, u16 *tc_offset)
4482 {
4483         struct hclge_rss_tc_mode_cmd *req;
4484         struct hclge_desc desc;
4485         int ret;
4486         int i;
4487
4488         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4489         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4490
4491         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4492                 u16 mode = 0;
4493
4494                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4495                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4496                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4497                 hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4498                               tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4499                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4500                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4501
4502                 req->rss_tc_mode[i] = cpu_to_le16(mode);
4503         }
4504
4505         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4506         if (ret)
4507                 dev_err(&hdev->pdev->dev,
4508                         "Configure rss tc mode fail, status = %d\n", ret);
4509
4510         return ret;
4511 }
4512
4513 static void hclge_get_rss_type(struct hclge_vport *vport)
4514 {
4515         if (vport->rss_tuple_sets.ipv4_tcp_en ||
4516             vport->rss_tuple_sets.ipv4_udp_en ||
4517             vport->rss_tuple_sets.ipv4_sctp_en ||
4518             vport->rss_tuple_sets.ipv6_tcp_en ||
4519             vport->rss_tuple_sets.ipv6_udp_en ||
4520             vport->rss_tuple_sets.ipv6_sctp_en)
4521                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4522         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4523                  vport->rss_tuple_sets.ipv6_fragment_en)
4524                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4525         else
4526                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4527 }
4528
4529 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4530 {
4531         struct hclge_rss_input_tuple_cmd *req;
4532         struct hclge_desc desc;
4533         int ret;
4534
4535         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4536
4537         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4538
4539         /* Get the tuple cfg from pf */
4540         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4541         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4542         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4543         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4544         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4545         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4546         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4547         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4548         hclge_get_rss_type(&hdev->vport[0]);
4549         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4550         if (ret)
4551                 dev_err(&hdev->pdev->dev,
4552                         "Configure rss input fail, status = %d\n", ret);
4553         return ret;
4554 }
4555
4556 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4557                          u8 *key, u8 *hfunc)
4558 {
4559         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4560         struct hclge_vport *vport = hclge_get_vport(handle);
4561         int i;
4562
4563         /* Get hash algorithm */
4564         if (hfunc) {
4565                 switch (vport->rss_algo) {
4566                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4567                         *hfunc = ETH_RSS_HASH_TOP;
4568                         break;
4569                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4570                         *hfunc = ETH_RSS_HASH_XOR;
4571                         break;
4572                 default:
4573                         *hfunc = ETH_RSS_HASH_UNKNOWN;
4574                         break;
4575                 }
4576         }
4577
4578         /* Get the RSS Key required by the user */
4579         if (key)
4580                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4581
4582         /* Get indirect table */
4583         if (indir)
4584                 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4585                         indir[i] =  vport->rss_indirection_tbl[i];
4586
4587         return 0;
4588 }
4589
4590 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4591                          const  u8 *key, const  u8 hfunc)
4592 {
4593         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4594         struct hclge_vport *vport = hclge_get_vport(handle);
4595         struct hclge_dev *hdev = vport->back;
4596         u8 hash_algo;
4597         int ret, i;
4598
4599         /* Set the RSS Hash Key if specififed by the user */
4600         if (key) {
4601                 switch (hfunc) {
4602                 case ETH_RSS_HASH_TOP:
4603                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4604                         break;
4605                 case ETH_RSS_HASH_XOR:
4606                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4607                         break;
4608                 case ETH_RSS_HASH_NO_CHANGE:
4609                         hash_algo = vport->rss_algo;
4610                         break;
4611                 default:
4612                         return -EINVAL;
4613                 }
4614
4615                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4616                 if (ret)
4617                         return ret;
4618
4619                 /* Update the shadow RSS key with user specified qids */
4620                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4621                 vport->rss_algo = hash_algo;
4622         }
4623
4624         /* Update the shadow RSS table with user specified qids */
4625         for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4626                 vport->rss_indirection_tbl[i] = indir[i];
4627
4628         /* Update the hardware */
4629         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4630 }
4631
4632 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4633 {
4634         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4635
4636         if (nfc->data & RXH_L4_B_2_3)
4637                 hash_sets |= HCLGE_D_PORT_BIT;
4638         else
4639                 hash_sets &= ~HCLGE_D_PORT_BIT;
4640
4641         if (nfc->data & RXH_IP_SRC)
4642                 hash_sets |= HCLGE_S_IP_BIT;
4643         else
4644                 hash_sets &= ~HCLGE_S_IP_BIT;
4645
4646         if (nfc->data & RXH_IP_DST)
4647                 hash_sets |= HCLGE_D_IP_BIT;
4648         else
4649                 hash_sets &= ~HCLGE_D_IP_BIT;
4650
4651         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4652                 hash_sets |= HCLGE_V_TAG_BIT;
4653
4654         return hash_sets;
4655 }
4656
4657 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4658                                     struct ethtool_rxnfc *nfc,
4659                                     struct hclge_rss_input_tuple_cmd *req)
4660 {
4661         struct hclge_dev *hdev = vport->back;
4662         u8 tuple_sets;
4663
4664         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4665         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4666         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4667         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4668         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4669         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4670         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4671         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4672
4673         tuple_sets = hclge_get_rss_hash_bits(nfc);
4674         switch (nfc->flow_type) {
4675         case TCP_V4_FLOW:
4676                 req->ipv4_tcp_en = tuple_sets;
4677                 break;
4678         case TCP_V6_FLOW:
4679                 req->ipv6_tcp_en = tuple_sets;
4680                 break;
4681         case UDP_V4_FLOW:
4682                 req->ipv4_udp_en = tuple_sets;
4683                 break;
4684         case UDP_V6_FLOW:
4685                 req->ipv6_udp_en = tuple_sets;
4686                 break;
4687         case SCTP_V4_FLOW:
4688                 req->ipv4_sctp_en = tuple_sets;
4689                 break;
4690         case SCTP_V6_FLOW:
4691                 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4692                     (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4693                         return -EINVAL;
4694
4695                 req->ipv6_sctp_en = tuple_sets;
4696                 break;
4697         case IPV4_FLOW:
4698                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4699                 break;
4700         case IPV6_FLOW:
4701                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4702                 break;
4703         default:
4704                 return -EINVAL;
4705         }
4706
4707         return 0;
4708 }
4709
4710 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4711                                struct ethtool_rxnfc *nfc)
4712 {
4713         struct hclge_vport *vport = hclge_get_vport(handle);
4714         struct hclge_dev *hdev = vport->back;
4715         struct hclge_rss_input_tuple_cmd *req;
4716         struct hclge_desc desc;
4717         int ret;
4718
4719         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4720                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
4721                 return -EINVAL;
4722
4723         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4724         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4725
4726         ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4727         if (ret) {
4728                 dev_err(&hdev->pdev->dev,
4729                         "failed to init rss tuple cmd, ret = %d\n", ret);
4730                 return ret;
4731         }
4732
4733         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4734         if (ret) {
4735                 dev_err(&hdev->pdev->dev,
4736                         "Set rss tuple fail, status = %d\n", ret);
4737                 return ret;
4738         }
4739
4740         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4741         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4742         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4743         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4744         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4745         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4746         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4747         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4748         hclge_get_rss_type(vport);
4749         return 0;
4750 }
4751
4752 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4753                                      u8 *tuple_sets)
4754 {
4755         switch (flow_type) {
4756         case TCP_V4_FLOW:
4757                 *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4758                 break;
4759         case UDP_V4_FLOW:
4760                 *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4761                 break;
4762         case TCP_V6_FLOW:
4763                 *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4764                 break;
4765         case UDP_V6_FLOW:
4766                 *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4767                 break;
4768         case SCTP_V4_FLOW:
4769                 *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4770                 break;
4771         case SCTP_V6_FLOW:
4772                 *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4773                 break;
4774         case IPV4_FLOW:
4775         case IPV6_FLOW:
4776                 *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4777                 break;
4778         default:
4779                 return -EINVAL;
4780         }
4781
4782         return 0;
4783 }
4784
4785 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4786 {
4787         u64 tuple_data = 0;
4788
4789         if (tuple_sets & HCLGE_D_PORT_BIT)
4790                 tuple_data |= RXH_L4_B_2_3;
4791         if (tuple_sets & HCLGE_S_PORT_BIT)
4792                 tuple_data |= RXH_L4_B_0_1;
4793         if (tuple_sets & HCLGE_D_IP_BIT)
4794                 tuple_data |= RXH_IP_DST;
4795         if (tuple_sets & HCLGE_S_IP_BIT)
4796                 tuple_data |= RXH_IP_SRC;
4797
4798         return tuple_data;
4799 }
4800
4801 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4802                                struct ethtool_rxnfc *nfc)
4803 {
4804         struct hclge_vport *vport = hclge_get_vport(handle);
4805         u8 tuple_sets;
4806         int ret;
4807
4808         nfc->data = 0;
4809
4810         ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4811         if (ret || !tuple_sets)
4812                 return ret;
4813
4814         nfc->data = hclge_convert_rss_tuple(tuple_sets);
4815
4816         return 0;
4817 }
4818
4819 static int hclge_get_tc_size(struct hnae3_handle *handle)
4820 {
4821         struct hclge_vport *vport = hclge_get_vport(handle);
4822         struct hclge_dev *hdev = vport->back;
4823
4824         return hdev->pf_rss_size_max;
4825 }
4826
4827 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4828 {
4829         struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4830         struct hclge_vport *vport = hdev->vport;
4831         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4832         u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4833         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4834         struct hnae3_tc_info *tc_info;
4835         u16 roundup_size;
4836         u16 rss_size;
4837         int i;
4838
4839         tc_info = &vport->nic.kinfo.tc_info;
4840         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4841                 rss_size = tc_info->tqp_count[i];
4842                 tc_valid[i] = 0;
4843
4844                 if (!(hdev->hw_tc_map & BIT(i)))
4845                         continue;
4846
4847                 /* tc_size set to hardware is the log2 of roundup power of two
4848                  * of rss_size, the acutal queue size is limited by indirection
4849                  * table.
4850                  */
4851                 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4852                     rss_size == 0) {
4853                         dev_err(&hdev->pdev->dev,
4854                                 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4855                                 rss_size);
4856                         return -EINVAL;
4857                 }
4858
4859                 roundup_size = roundup_pow_of_two(rss_size);
4860                 roundup_size = ilog2(roundup_size);
4861
4862                 tc_valid[i] = 1;
4863                 tc_size[i] = roundup_size;
4864                 tc_offset[i] = tc_info->tqp_offset[i];
4865         }
4866
4867         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4868 }
4869
4870 int hclge_rss_init_hw(struct hclge_dev *hdev)
4871 {
4872         struct hclge_vport *vport = hdev->vport;
4873         u16 *rss_indir = vport[0].rss_indirection_tbl;
4874         u8 *key = vport[0].rss_hash_key;
4875         u8 hfunc = vport[0].rss_algo;
4876         int ret;
4877
4878         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4879         if (ret)
4880                 return ret;
4881
4882         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4883         if (ret)
4884                 return ret;
4885
4886         ret = hclge_set_rss_input_tuple(hdev);
4887         if (ret)
4888                 return ret;
4889
4890         return hclge_init_rss_tc_mode(hdev);
4891 }
4892
4893 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4894 {
4895         struct hclge_vport *vport = &hdev->vport[0];
4896         int i;
4897
4898         for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
4899                 vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
4900 }
4901
4902 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
4903 {
4904         u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
4905         int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4906         struct hclge_vport *vport = &hdev->vport[0];
4907         u16 *rss_ind_tbl;
4908
4909         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
4910                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4911
4912         vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4913         vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4914         vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
4915         vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4916         vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4917         vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4918         vport->rss_tuple_sets.ipv6_sctp_en =
4919                 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
4920                 HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
4921                 HCLGE_RSS_INPUT_TUPLE_SCTP;
4922         vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4923
4924         vport->rss_algo = rss_algo;
4925
4926         rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
4927                                    sizeof(*rss_ind_tbl), GFP_KERNEL);
4928         if (!rss_ind_tbl)
4929                 return -ENOMEM;
4930
4931         vport->rss_indirection_tbl = rss_ind_tbl;
4932         memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
4933
4934         hclge_rss_indir_init_cfg(hdev);
4935
4936         return 0;
4937 }
4938
4939 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4940                                 int vector_id, bool en,
4941                                 struct hnae3_ring_chain_node *ring_chain)
4942 {
4943         struct hclge_dev *hdev = vport->back;
4944         struct hnae3_ring_chain_node *node;
4945         struct hclge_desc desc;
4946         struct hclge_ctrl_vector_chain_cmd *req =
4947                 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4948         enum hclge_cmd_status status;
4949         enum hclge_opcode_type op;
4950         u16 tqp_type_and_id;
4951         int i;
4952
4953         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4954         hclge_cmd_setup_basic_desc(&desc, op, false);
4955         req->int_vector_id_l = hnae3_get_field(vector_id,
4956                                                HCLGE_VECTOR_ID_L_M,
4957                                                HCLGE_VECTOR_ID_L_S);
4958         req->int_vector_id_h = hnae3_get_field(vector_id,
4959                                                HCLGE_VECTOR_ID_H_M,
4960                                                HCLGE_VECTOR_ID_H_S);
4961
4962         i = 0;
4963         for (node = ring_chain; node; node = node->next) {
4964                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4965                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4966                                 HCLGE_INT_TYPE_S,
4967                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4968                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4969                                 HCLGE_TQP_ID_S, node->tqp_index);
4970                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4971                                 HCLGE_INT_GL_IDX_S,
4972                                 hnae3_get_field(node->int_gl_idx,
4973                                                 HNAE3_RING_GL_IDX_M,
4974                                                 HNAE3_RING_GL_IDX_S));
4975                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4976                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4977                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4978                         req->vfid = vport->vport_id;
4979
4980                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4981                         if (status) {
4982                                 dev_err(&hdev->pdev->dev,
4983                                         "Map TQP fail, status is %d.\n",
4984                                         status);
4985                                 return -EIO;
4986                         }
4987                         i = 0;
4988
4989                         hclge_cmd_setup_basic_desc(&desc,
4990                                                    op,
4991                                                    false);
4992                         req->int_vector_id_l =
4993                                 hnae3_get_field(vector_id,
4994                                                 HCLGE_VECTOR_ID_L_M,
4995                                                 HCLGE_VECTOR_ID_L_S);
4996                         req->int_vector_id_h =
4997                                 hnae3_get_field(vector_id,
4998                                                 HCLGE_VECTOR_ID_H_M,
4999                                                 HCLGE_VECTOR_ID_H_S);
5000                 }
5001         }
5002
5003         if (i > 0) {
5004                 req->int_cause_num = i;
5005                 req->vfid = vport->vport_id;
5006                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5007                 if (status) {
5008                         dev_err(&hdev->pdev->dev,
5009                                 "Map TQP fail, status is %d.\n", status);
5010                         return -EIO;
5011                 }
5012         }
5013
5014         return 0;
5015 }
5016
5017 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5018                                     struct hnae3_ring_chain_node *ring_chain)
5019 {
5020         struct hclge_vport *vport = hclge_get_vport(handle);
5021         struct hclge_dev *hdev = vport->back;
5022         int vector_id;
5023
5024         vector_id = hclge_get_vector_index(hdev, vector);
5025         if (vector_id < 0) {
5026                 dev_err(&hdev->pdev->dev,
5027                         "failed to get vector index. vector=%d\n", vector);
5028                 return vector_id;
5029         }
5030
5031         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5032 }
5033
5034 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5035                                        struct hnae3_ring_chain_node *ring_chain)
5036 {
5037         struct hclge_vport *vport = hclge_get_vport(handle);
5038         struct hclge_dev *hdev = vport->back;
5039         int vector_id, ret;
5040
5041         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5042                 return 0;
5043
5044         vector_id = hclge_get_vector_index(hdev, vector);
5045         if (vector_id < 0) {
5046                 dev_err(&handle->pdev->dev,
5047                         "Get vector index fail. ret =%d\n", vector_id);
5048                 return vector_id;
5049         }
5050
5051         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5052         if (ret)
5053                 dev_err(&handle->pdev->dev,
5054                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5055                         vector_id, ret);
5056
5057         return ret;
5058 }
5059
5060 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5061                                       bool en_uc, bool en_mc, bool en_bc)
5062 {
5063         struct hclge_vport *vport = &hdev->vport[vf_id];
5064         struct hnae3_handle *handle = &vport->nic;
5065         struct hclge_promisc_cfg_cmd *req;
5066         struct hclge_desc desc;
5067         bool uc_tx_en = en_uc;
5068         u8 promisc_cfg = 0;
5069         int ret;
5070
5071         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5072
5073         req = (struct hclge_promisc_cfg_cmd *)desc.data;
5074         req->vf_id = vf_id;
5075
5076         if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5077                 uc_tx_en = false;
5078
5079         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5080         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5081         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5082         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5083         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5084         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5085         req->extend_promisc = promisc_cfg;
5086
5087         /* to be compatible with DEVICE_VERSION_V1/2 */
5088         promisc_cfg = 0;
5089         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5090         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5091         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5092         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5093         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5094         req->promisc = promisc_cfg;
5095
5096         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5097         if (ret)
5098                 dev_err(&hdev->pdev->dev,
5099                         "failed to set vport %u promisc mode, ret = %d.\n",
5100                         vf_id, ret);
5101
5102         return ret;
5103 }
5104
5105 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5106                                  bool en_mc_pmc, bool en_bc_pmc)
5107 {
5108         return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5109                                           en_uc_pmc, en_mc_pmc, en_bc_pmc);
5110 }
5111
5112 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5113                                   bool en_mc_pmc)
5114 {
5115         struct hclge_vport *vport = hclge_get_vport(handle);
5116         struct hclge_dev *hdev = vport->back;
5117         bool en_bc_pmc = true;
5118
5119         /* For device whose version below V2, if broadcast promisc enabled,
5120          * vlan filter is always bypassed. So broadcast promisc should be
5121          * disabled until user enable promisc mode
5122          */
5123         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5124                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5125
5126         return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5127                                             en_bc_pmc);
5128 }
5129
5130 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5131 {
5132         struct hclge_vport *vport = hclge_get_vport(handle);
5133         struct hclge_dev *hdev = vport->back;
5134
5135         set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
5136 }
5137
5138 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5139 {
5140         if (hlist_empty(&hdev->fd_rule_list))
5141                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5142 }
5143
5144 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5145 {
5146         if (!test_bit(location, hdev->fd_bmap)) {
5147                 set_bit(location, hdev->fd_bmap);
5148                 hdev->hclge_fd_rule_num++;
5149         }
5150 }
5151
5152 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5153 {
5154         if (test_bit(location, hdev->fd_bmap)) {
5155                 clear_bit(location, hdev->fd_bmap);
5156                 hdev->hclge_fd_rule_num--;
5157         }
5158 }
5159
5160 static void hclge_fd_free_node(struct hclge_dev *hdev,
5161                                struct hclge_fd_rule *rule)
5162 {
5163         hlist_del(&rule->rule_node);
5164         kfree(rule);
5165         hclge_sync_fd_state(hdev);
5166 }
5167
5168 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5169                                       struct hclge_fd_rule *old_rule,
5170                                       struct hclge_fd_rule *new_rule,
5171                                       enum HCLGE_FD_NODE_STATE state)
5172 {
5173         switch (state) {
5174         case HCLGE_FD_TO_ADD:
5175         case HCLGE_FD_ACTIVE:
5176                 /* 1) if the new state is TO_ADD, just replace the old rule
5177                  * with the same location, no matter its state, because the
5178                  * new rule will be configured to the hardware.
5179                  * 2) if the new state is ACTIVE, it means the new rule
5180                  * has been configured to the hardware, so just replace
5181                  * the old rule node with the same location.
5182                  * 3) for it doesn't add a new node to the list, so it's
5183                  * unnecessary to update the rule number and fd_bmap.
5184                  */
5185                 new_rule->rule_node.next = old_rule->rule_node.next;
5186                 new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5187                 memcpy(old_rule, new_rule, sizeof(*old_rule));
5188                 kfree(new_rule);
5189                 break;
5190         case HCLGE_FD_DELETED:
5191                 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5192                 hclge_fd_free_node(hdev, old_rule);
5193                 break;
5194         case HCLGE_FD_TO_DEL:
5195                 /* if new request is TO_DEL, and old rule is existent
5196                  * 1) the state of old rule is TO_DEL, we need do nothing,
5197                  * because we delete rule by location, other rule content
5198                  * is unncessary.
5199                  * 2) the state of old rule is ACTIVE, we need to change its
5200                  * state to TO_DEL, so the rule will be deleted when periodic
5201                  * task being scheduled.
5202                  * 3) the state of old rule is TO_ADD, it means the rule hasn't
5203                  * been added to hardware, so we just delete the rule node from
5204                  * fd_rule_list directly.
5205                  */
5206                 if (old_rule->state == HCLGE_FD_TO_ADD) {
5207                         hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5208                         hclge_fd_free_node(hdev, old_rule);
5209                         return;
5210                 }
5211                 old_rule->state = HCLGE_FD_TO_DEL;
5212                 break;
5213         }
5214 }
5215
5216 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5217                                                 u16 location,
5218                                                 struct hclge_fd_rule **parent)
5219 {
5220         struct hclge_fd_rule *rule;
5221         struct hlist_node *node;
5222
5223         hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5224                 if (rule->location == location)
5225                         return rule;
5226                 else if (rule->location > location)
5227                         return NULL;
5228                 /* record the parent node, use to keep the nodes in fd_rule_list
5229                  * in ascend order.
5230                  */
5231                 *parent = rule;
5232         }
5233
5234         return NULL;
5235 }
5236
5237 /* insert fd rule node in ascend order according to rule->location */
5238 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5239                                       struct hclge_fd_rule *rule,
5240                                       struct hclge_fd_rule *parent)
5241 {
5242         INIT_HLIST_NODE(&rule->rule_node);
5243
5244         if (parent)
5245                 hlist_add_behind(&rule->rule_node, &parent->rule_node);
5246         else
5247                 hlist_add_head(&rule->rule_node, hlist);
5248 }
5249
5250 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5251                                      struct hclge_fd_user_def_cfg *cfg)
5252 {
5253         struct hclge_fd_user_def_cfg_cmd *req;
5254         struct hclge_desc desc;
5255         u16 data = 0;
5256         int ret;
5257
5258         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5259
5260         req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5261
5262         hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5263         hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5264                         HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5265         req->ol2_cfg = cpu_to_le16(data);
5266
5267         data = 0;
5268         hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5269         hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5270                         HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5271         req->ol3_cfg = cpu_to_le16(data);
5272
5273         data = 0;
5274         hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5275         hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5276                         HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5277         req->ol4_cfg = cpu_to_le16(data);
5278
5279         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5280         if (ret)
5281                 dev_err(&hdev->pdev->dev,
5282                         "failed to set fd user def data, ret= %d\n", ret);
5283         return ret;
5284 }
5285
5286 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5287 {
5288         int ret;
5289
5290         if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5291                 return;
5292
5293         if (!locked)
5294                 spin_lock_bh(&hdev->fd_rule_lock);
5295
5296         ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5297         if (ret)
5298                 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5299
5300         if (!locked)
5301                 spin_unlock_bh(&hdev->fd_rule_lock);
5302 }
5303
5304 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5305                                           struct hclge_fd_rule *rule)
5306 {
5307         struct hlist_head *hlist = &hdev->fd_rule_list;
5308         struct hclge_fd_rule *fd_rule, *parent = NULL;
5309         struct hclge_fd_user_def_info *info, *old_info;
5310         struct hclge_fd_user_def_cfg *cfg;
5311
5312         if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5313             rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5314                 return 0;
5315
5316         /* for valid layer is start from 1, so need minus 1 to get the cfg */
5317         cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5318         info = &rule->ep.user_def;
5319
5320         if (!cfg->ref_cnt || cfg->offset == info->offset)
5321                 return 0;
5322
5323         if (cfg->ref_cnt > 1)
5324                 goto error;
5325
5326         fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5327         if (fd_rule) {
5328                 old_info = &fd_rule->ep.user_def;
5329                 if (info->layer == old_info->layer)
5330                         return 0;
5331         }
5332
5333 error:
5334         dev_err(&hdev->pdev->dev,
5335                 "No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5336                 info->layer + 1);
5337         return -ENOSPC;
5338 }
5339
5340 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5341                                          struct hclge_fd_rule *rule)
5342 {
5343         struct hclge_fd_user_def_cfg *cfg;
5344
5345         if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5346             rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5347                 return;
5348
5349         cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5350         if (!cfg->ref_cnt) {
5351                 cfg->offset = rule->ep.user_def.offset;
5352                 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5353         }
5354         cfg->ref_cnt++;
5355 }
5356
5357 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5358                                          struct hclge_fd_rule *rule)
5359 {
5360         struct hclge_fd_user_def_cfg *cfg;
5361
5362         if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5363             rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5364                 return;
5365
5366         cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5367         if (!cfg->ref_cnt)
5368                 return;
5369
5370         cfg->ref_cnt--;
5371         if (!cfg->ref_cnt) {
5372                 cfg->offset = 0;
5373                 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5374         }
5375 }
5376
5377 static void hclge_update_fd_list(struct hclge_dev *hdev,
5378                                  enum HCLGE_FD_NODE_STATE state, u16 location,
5379                                  struct hclge_fd_rule *new_rule)
5380 {
5381         struct hlist_head *hlist = &hdev->fd_rule_list;
5382         struct hclge_fd_rule *fd_rule, *parent = NULL;
5383
5384         fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5385         if (fd_rule) {
5386                 hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5387                 if (state == HCLGE_FD_ACTIVE)
5388                         hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5389                 hclge_sync_fd_user_def_cfg(hdev, true);
5390
5391                 hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5392                 return;
5393         }
5394
5395         /* it's unlikely to fail here, because we have checked the rule
5396          * exist before.
5397          */
5398         if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5399                 dev_warn(&hdev->pdev->dev,
5400                          "failed to delete fd rule %u, it's inexistent\n",
5401                          location);
5402                 return;
5403         }
5404
5405         hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5406         hclge_sync_fd_user_def_cfg(hdev, true);
5407
5408         hclge_fd_insert_rule_node(hlist, new_rule, parent);
5409         hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5410
5411         if (state == HCLGE_FD_TO_ADD) {
5412                 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5413                 hclge_task_schedule(hdev, 0);
5414         }
5415 }
5416
5417 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5418 {
5419         struct hclge_get_fd_mode_cmd *req;
5420         struct hclge_desc desc;
5421         int ret;
5422
5423         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5424
5425         req = (struct hclge_get_fd_mode_cmd *)desc.data;
5426
5427         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5428         if (ret) {
5429                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5430                 return ret;
5431         }
5432
5433         *fd_mode = req->mode;
5434
5435         return ret;
5436 }
5437
5438 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5439                                    u32 *stage1_entry_num,
5440                                    u32 *stage2_entry_num,
5441                                    u16 *stage1_counter_num,
5442                                    u16 *stage2_counter_num)
5443 {
5444         struct hclge_get_fd_allocation_cmd *req;
5445         struct hclge_desc desc;
5446         int ret;
5447
5448         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5449
5450         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5451
5452         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5453         if (ret) {
5454                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5455                         ret);
5456                 return ret;
5457         }
5458
5459         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5460         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5461         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5462         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5463
5464         return ret;
5465 }
5466
5467 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5468                                    enum HCLGE_FD_STAGE stage_num)
5469 {
5470         struct hclge_set_fd_key_config_cmd *req;
5471         struct hclge_fd_key_cfg *stage;
5472         struct hclge_desc desc;
5473         int ret;
5474
5475         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5476
5477         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5478         stage = &hdev->fd_cfg.key_cfg[stage_num];
5479         req->stage = stage_num;
5480         req->key_select = stage->key_sel;
5481         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5482         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5483         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5484         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5485         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5486         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5487
5488         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5489         if (ret)
5490                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5491
5492         return ret;
5493 }
5494
5495 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5496 {
5497         struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5498
5499         spin_lock_bh(&hdev->fd_rule_lock);
5500         memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5501         spin_unlock_bh(&hdev->fd_rule_lock);
5502
5503         hclge_fd_set_user_def_cmd(hdev, cfg);
5504 }
5505
5506 static int hclge_init_fd_config(struct hclge_dev *hdev)
5507 {
5508 #define LOW_2_WORDS             0x03
5509         struct hclge_fd_key_cfg *key_cfg;
5510         int ret;
5511
5512         if (!hnae3_dev_fd_supported(hdev))
5513                 return 0;
5514
5515         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5516         if (ret)
5517                 return ret;
5518
5519         switch (hdev->fd_cfg.fd_mode) {
5520         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5521                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5522                 break;
5523         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5524                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5525                 break;
5526         default:
5527                 dev_err(&hdev->pdev->dev,
5528                         "Unsupported flow director mode %u\n",
5529                         hdev->fd_cfg.fd_mode);
5530                 return -EOPNOTSUPP;
5531         }
5532
5533         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5534         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5535         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5536         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5537         key_cfg->outer_sipv6_word_en = 0;
5538         key_cfg->outer_dipv6_word_en = 0;
5539
5540         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5541                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5542                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5543                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5544
5545         /* If use max 400bit key, we can support tuples for ether type */
5546         if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5547                 key_cfg->tuple_active |=
5548                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5549                 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5550                         key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5551         }
5552
5553         /* roce_type is used to filter roce frames
5554          * dst_vport is used to specify the rule
5555          */
5556         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5557
5558         ret = hclge_get_fd_allocation(hdev,
5559                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5560                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5561                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5562                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5563         if (ret)
5564                 return ret;
5565
5566         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5567 }
5568
5569 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5570                                 int loc, u8 *key, bool is_add)
5571 {
5572         struct hclge_fd_tcam_config_1_cmd *req1;
5573         struct hclge_fd_tcam_config_2_cmd *req2;
5574         struct hclge_fd_tcam_config_3_cmd *req3;
5575         struct hclge_desc desc[3];
5576         int ret;
5577
5578         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5579         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5580         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5581         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5582         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5583
5584         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5585         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5586         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5587
5588         req1->stage = stage;
5589         req1->xy_sel = sel_x ? 1 : 0;
5590         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5591         req1->index = cpu_to_le32(loc);
5592         req1->entry_vld = sel_x ? is_add : 0;
5593
5594         if (key) {
5595                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5596                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5597                        sizeof(req2->tcam_data));
5598                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5599                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5600         }
5601
5602         ret = hclge_cmd_send(&hdev->hw, desc, 3);
5603         if (ret)
5604                 dev_err(&hdev->pdev->dev,
5605                         "config tcam key fail, ret=%d\n",
5606                         ret);
5607
5608         return ret;
5609 }
5610
5611 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5612                               struct hclge_fd_ad_data *action)
5613 {
5614         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5615         struct hclge_fd_ad_config_cmd *req;
5616         struct hclge_desc desc;
5617         u64 ad_data = 0;
5618         int ret;
5619
5620         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5621
5622         req = (struct hclge_fd_ad_config_cmd *)desc.data;
5623         req->index = cpu_to_le32(loc);
5624         req->stage = stage;
5625
5626         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5627                       action->write_rule_id_to_bd);
5628         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5629                         action->rule_id);
5630         if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5631                 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5632                               action->override_tc);
5633                 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5634                                 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5635         }
5636         ad_data <<= 32;
5637         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5638         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5639                       action->forward_to_direct_queue);
5640         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5641                         action->queue_id);
5642         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5643         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5644                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5645         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5646         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5647                         action->counter_id);
5648
5649         req->ad_data = cpu_to_le64(ad_data);
5650         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5651         if (ret)
5652                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5653
5654         return ret;
5655 }
5656
5657 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5658                                    struct hclge_fd_rule *rule)
5659 {
5660         int offset, moffset, ip_offset;
5661         enum HCLGE_FD_KEY_OPT key_opt;
5662         u16 tmp_x_s, tmp_y_s;
5663         u32 tmp_x_l, tmp_y_l;
5664         u8 *p = (u8 *)rule;
5665         int i;
5666
5667         if (rule->unused_tuple & BIT(tuple_bit))
5668                 return true;
5669
5670         key_opt = tuple_key_info[tuple_bit].key_opt;
5671         offset = tuple_key_info[tuple_bit].offset;
5672         moffset = tuple_key_info[tuple_bit].moffset;
5673
5674         switch (key_opt) {
5675         case KEY_OPT_U8:
5676                 calc_x(*key_x, p[offset], p[moffset]);
5677                 calc_y(*key_y, p[offset], p[moffset]);
5678
5679                 return true;
5680         case KEY_OPT_LE16:
5681                 calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5682                 calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5683                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5684                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5685
5686                 return true;
5687         case KEY_OPT_LE32:
5688                 calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5689                 calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5690                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5691                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5692
5693                 return true;
5694         case KEY_OPT_MAC:
5695                 for (i = 0; i < ETH_ALEN; i++) {
5696                         calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5697                                p[moffset + i]);
5698                         calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5699                                p[moffset + i]);
5700                 }
5701
5702                 return true;
5703         case KEY_OPT_IP:
5704                 ip_offset = IPV4_INDEX * sizeof(u32);
5705                 calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5706                        *(u32 *)(&p[moffset + ip_offset]));
5707                 calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5708                        *(u32 *)(&p[moffset + ip_offset]));
5709                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5710                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5711
5712                 return true;
5713         default:
5714                 return false;
5715         }
5716 }
5717
5718 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5719                                  u8 vf_id, u8 network_port_id)
5720 {
5721         u32 port_number = 0;
5722
5723         if (port_type == HOST_PORT) {
5724                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5725                                 pf_id);
5726                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5727                                 vf_id);
5728                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5729         } else {
5730                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5731                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5732                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5733         }
5734
5735         return port_number;
5736 }
5737
5738 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5739                                        __le32 *key_x, __le32 *key_y,
5740                                        struct hclge_fd_rule *rule)
5741 {
5742         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5743         u8 cur_pos = 0, tuple_size, shift_bits;
5744         unsigned int i;
5745
5746         for (i = 0; i < MAX_META_DATA; i++) {
5747                 tuple_size = meta_data_key_info[i].key_length;
5748                 tuple_bit = key_cfg->meta_data_active & BIT(i);
5749
5750                 switch (tuple_bit) {
5751                 case BIT(ROCE_TYPE):
5752                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5753                         cur_pos += tuple_size;
5754                         break;
5755                 case BIT(DST_VPORT):
5756                         port_number = hclge_get_port_number(HOST_PORT, 0,
5757                                                             rule->vf_id, 0);
5758                         hnae3_set_field(meta_data,
5759                                         GENMASK(cur_pos + tuple_size, cur_pos),
5760                                         cur_pos, port_number);
5761                         cur_pos += tuple_size;
5762                         break;
5763                 default:
5764                         break;
5765                 }
5766         }
5767
5768         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5769         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5770         shift_bits = sizeof(meta_data) * 8 - cur_pos;
5771
5772         *key_x = cpu_to_le32(tmp_x << shift_bits);
5773         *key_y = cpu_to_le32(tmp_y << shift_bits);
5774 }
5775
5776 /* A complete key is combined with meta data key and tuple key.
5777  * Meta data key is stored at the MSB region, and tuple key is stored at
5778  * the LSB region, unused bits will be filled 0.
5779  */
5780 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5781                             struct hclge_fd_rule *rule)
5782 {
5783         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5784         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5785         u8 *cur_key_x, *cur_key_y;
5786         u8 meta_data_region;
5787         u8 tuple_size;
5788         int ret;
5789         u32 i;
5790
5791         memset(key_x, 0, sizeof(key_x));
5792         memset(key_y, 0, sizeof(key_y));
5793         cur_key_x = key_x;
5794         cur_key_y = key_y;
5795
5796         for (i = 0 ; i < MAX_TUPLE; i++) {
5797                 bool tuple_valid;
5798
5799                 tuple_size = tuple_key_info[i].key_length / 8;
5800                 if (!(key_cfg->tuple_active & BIT(i)))
5801                         continue;
5802
5803                 tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
5804                                                      cur_key_y, rule);
5805                 if (tuple_valid) {
5806                         cur_key_x += tuple_size;
5807                         cur_key_y += tuple_size;
5808                 }
5809         }
5810
5811         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5812                         MAX_META_DATA_LENGTH / 8;
5813
5814         hclge_fd_convert_meta_data(key_cfg,
5815                                    (__le32 *)(key_x + meta_data_region),
5816                                    (__le32 *)(key_y + meta_data_region),
5817                                    rule);
5818
5819         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5820                                    true);
5821         if (ret) {
5822                 dev_err(&hdev->pdev->dev,
5823                         "fd key_y config fail, loc=%u, ret=%d\n",
5824                         rule->queue_id, ret);
5825                 return ret;
5826         }
5827
5828         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5829                                    true);
5830         if (ret)
5831                 dev_err(&hdev->pdev->dev,
5832                         "fd key_x config fail, loc=%u, ret=%d\n",
5833                         rule->queue_id, ret);
5834         return ret;
5835 }
5836
5837 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5838                                struct hclge_fd_rule *rule)
5839 {
5840         struct hclge_vport *vport = hdev->vport;
5841         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5842         struct hclge_fd_ad_data ad_data;
5843
5844         memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5845         ad_data.ad_id = rule->location;
5846
5847         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5848                 ad_data.drop_packet = true;
5849         } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5850                 ad_data.override_tc = true;
5851                 ad_data.queue_id =
5852                         kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5853                 ad_data.tc_size =
5854                         ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5855         } else {
5856                 ad_data.forward_to_direct_queue = true;
5857                 ad_data.queue_id = rule->queue_id;
5858         }
5859
5860         ad_data.use_counter = false;
5861         ad_data.counter_id = 0;
5862
5863         ad_data.use_next_stage = false;
5864         ad_data.next_input_key = 0;
5865
5866         ad_data.write_rule_id_to_bd = true;
5867         ad_data.rule_id = rule->location;
5868
5869         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5870 }
5871
5872 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5873                                        u32 *unused_tuple)
5874 {
5875         if (!spec || !unused_tuple)
5876                 return -EINVAL;
5877
5878         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5879
5880         if (!spec->ip4src)
5881                 *unused_tuple |= BIT(INNER_SRC_IP);
5882
5883         if (!spec->ip4dst)
5884                 *unused_tuple |= BIT(INNER_DST_IP);
5885
5886         if (!spec->psrc)
5887                 *unused_tuple |= BIT(INNER_SRC_PORT);
5888
5889         if (!spec->pdst)
5890                 *unused_tuple |= BIT(INNER_DST_PORT);
5891
5892         if (!spec->tos)
5893                 *unused_tuple |= BIT(INNER_IP_TOS);
5894
5895         return 0;
5896 }
5897
5898 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5899                                     u32 *unused_tuple)
5900 {
5901         if (!spec || !unused_tuple)
5902                 return -EINVAL;
5903
5904         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5905                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5906
5907         if (!spec->ip4src)
5908                 *unused_tuple |= BIT(INNER_SRC_IP);
5909
5910         if (!spec->ip4dst)
5911                 *unused_tuple |= BIT(INNER_DST_IP);
5912
5913         if (!spec->tos)
5914                 *unused_tuple |= BIT(INNER_IP_TOS);
5915
5916         if (!spec->proto)
5917                 *unused_tuple |= BIT(INNER_IP_PROTO);
5918
5919         if (spec->l4_4_bytes)
5920                 return -EOPNOTSUPP;
5921
5922         if (spec->ip_ver != ETH_RX_NFC_IP4)
5923                 return -EOPNOTSUPP;
5924
5925         return 0;
5926 }
5927
5928 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5929                                        u32 *unused_tuple)
5930 {
5931         if (!spec || !unused_tuple)
5932                 return -EINVAL;
5933
5934         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5935
5936         /* check whether src/dst ip address used */
5937         if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5938                 *unused_tuple |= BIT(INNER_SRC_IP);
5939
5940         if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5941                 *unused_tuple |= BIT(INNER_DST_IP);
5942
5943         if (!spec->psrc)
5944                 *unused_tuple |= BIT(INNER_SRC_PORT);
5945
5946         if (!spec->pdst)
5947                 *unused_tuple |= BIT(INNER_DST_PORT);
5948
5949         if (!spec->tclass)
5950                 *unused_tuple |= BIT(INNER_IP_TOS);
5951
5952         return 0;
5953 }
5954
5955 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5956                                     u32 *unused_tuple)
5957 {
5958         if (!spec || !unused_tuple)
5959                 return -EINVAL;
5960
5961         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5962                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5963
5964         /* check whether src/dst ip address used */
5965         if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5966                 *unused_tuple |= BIT(INNER_SRC_IP);
5967
5968         if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5969                 *unused_tuple |= BIT(INNER_DST_IP);
5970
5971         if (!spec->l4_proto)
5972                 *unused_tuple |= BIT(INNER_IP_PROTO);
5973
5974         if (!spec->tclass)
5975                 *unused_tuple |= BIT(INNER_IP_TOS);
5976
5977         if (spec->l4_4_bytes)
5978                 return -EOPNOTSUPP;
5979
5980         return 0;
5981 }
5982
5983 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5984 {
5985         if (!spec || !unused_tuple)
5986                 return -EINVAL;
5987
5988         *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5989                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5990                 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5991
5992         if (is_zero_ether_addr(spec->h_source))
5993                 *unused_tuple |= BIT(INNER_SRC_MAC);
5994
5995         if (is_zero_ether_addr(spec->h_dest))
5996                 *unused_tuple |= BIT(INNER_DST_MAC);
5997
5998         if (!spec->h_proto)
5999                 *unused_tuple |= BIT(INNER_ETH_TYPE);
6000
6001         return 0;
6002 }
6003
6004 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
6005                                     struct ethtool_rx_flow_spec *fs,
6006                                     u32 *unused_tuple)
6007 {
6008         if (fs->flow_type & FLOW_EXT) {
6009                 if (fs->h_ext.vlan_etype) {
6010                         dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
6011                         return -EOPNOTSUPP;
6012                 }
6013
6014                 if (!fs->h_ext.vlan_tci)
6015                         *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6016
6017                 if (fs->m_ext.vlan_tci &&
6018                     be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
6019                         dev_err(&hdev->pdev->dev,
6020                                 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
6021                                 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
6022                         return -EINVAL;
6023                 }
6024         } else {
6025                 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6026         }
6027
6028         if (fs->flow_type & FLOW_MAC_EXT) {
6029                 if (hdev->fd_cfg.fd_mode !=
6030                     HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6031                         dev_err(&hdev->pdev->dev,
6032                                 "FLOW_MAC_EXT is not supported in current fd mode!\n");
6033                         return -EOPNOTSUPP;
6034                 }
6035
6036                 if (is_zero_ether_addr(fs->h_ext.h_dest))
6037                         *unused_tuple |= BIT(INNER_DST_MAC);
6038                 else
6039                         *unused_tuple &= ~BIT(INNER_DST_MAC);
6040         }
6041
6042         return 0;
6043 }
6044
6045 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
6046                                        struct hclge_fd_user_def_info *info)
6047 {
6048         switch (flow_type) {
6049         case ETHER_FLOW:
6050                 info->layer = HCLGE_FD_USER_DEF_L2;
6051                 *unused_tuple &= ~BIT(INNER_L2_RSV);
6052                 break;
6053         case IP_USER_FLOW:
6054         case IPV6_USER_FLOW:
6055                 info->layer = HCLGE_FD_USER_DEF_L3;
6056                 *unused_tuple &= ~BIT(INNER_L3_RSV);
6057                 break;
6058         case TCP_V4_FLOW:
6059         case UDP_V4_FLOW:
6060         case TCP_V6_FLOW:
6061         case UDP_V6_FLOW:
6062                 info->layer = HCLGE_FD_USER_DEF_L4;
6063                 *unused_tuple &= ~BIT(INNER_L4_RSV);
6064                 break;
6065         default:
6066                 return -EOPNOTSUPP;
6067         }
6068
6069         return 0;
6070 }
6071
6072 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6073 {
6074         return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6075 }
6076
6077 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6078                                          struct ethtool_rx_flow_spec *fs,
6079                                          u32 *unused_tuple,
6080                                          struct hclge_fd_user_def_info *info)
6081 {
6082         u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6083         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6084         u16 data, offset, data_mask, offset_mask;
6085         int ret;
6086
6087         info->layer = HCLGE_FD_USER_DEF_NONE;
6088         *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6089
6090         if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6091                 return 0;
6092
6093         /* user-def data from ethtool is 64 bit value, the bit0~15 is used
6094          * for data, and bit32~47 is used for offset.
6095          */
6096         data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6097         data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6098         offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6099         offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6100
6101         if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6102                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6103                 return -EOPNOTSUPP;
6104         }
6105
6106         if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6107                 dev_err(&hdev->pdev->dev,
6108                         "user-def offset[%u] should be no more than %u\n",
6109                         offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6110                 return -EINVAL;
6111         }
6112
6113         if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6114                 dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6115                 return -EINVAL;
6116         }
6117
6118         ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6119         if (ret) {
6120                 dev_err(&hdev->pdev->dev,
6121                         "unsupported flow type for user-def bytes, ret = %d\n",
6122                         ret);
6123                 return ret;
6124         }
6125
6126         info->data = data;
6127         info->data_mask = data_mask;
6128         info->offset = offset;
6129
6130         return 0;
6131 }
6132
6133 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6134                                struct ethtool_rx_flow_spec *fs,
6135                                u32 *unused_tuple,
6136                                struct hclge_fd_user_def_info *info)
6137 {
6138         u32 flow_type;
6139         int ret;
6140
6141         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6142                 dev_err(&hdev->pdev->dev,
6143                         "failed to config fd rules, invalid rule location: %u, max is %u\n.",
6144                         fs->location,
6145                         hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6146                 return -EINVAL;
6147         }
6148
6149         ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6150         if (ret)
6151                 return ret;
6152
6153         flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6154         switch (flow_type) {
6155         case SCTP_V4_FLOW:
6156         case TCP_V4_FLOW:
6157         case UDP_V4_FLOW:
6158                 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6159                                                   unused_tuple);
6160                 break;
6161         case IP_USER_FLOW:
6162                 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6163                                                unused_tuple);
6164                 break;
6165         case SCTP_V6_FLOW:
6166         case TCP_V6_FLOW:
6167         case UDP_V6_FLOW:
6168                 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6169                                                   unused_tuple);
6170                 break;
6171         case IPV6_USER_FLOW:
6172                 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6173                                                unused_tuple);
6174                 break;
6175         case ETHER_FLOW:
6176                 if (hdev->fd_cfg.fd_mode !=
6177                         HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6178                         dev_err(&hdev->pdev->dev,
6179                                 "ETHER_FLOW is not supported in current fd mode!\n");
6180                         return -EOPNOTSUPP;
6181                 }
6182
6183                 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6184                                                  unused_tuple);
6185                 break;
6186         default:
6187                 dev_err(&hdev->pdev->dev,
6188                         "unsupported protocol type, protocol type = %#x\n",
6189                         flow_type);
6190                 return -EOPNOTSUPP;
6191         }
6192
6193         if (ret) {
6194                 dev_err(&hdev->pdev->dev,
6195                         "failed to check flow union tuple, ret = %d\n",
6196                         ret);
6197                 return ret;
6198         }
6199
6200         return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6201 }
6202
6203 static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
6204                                       struct ethtool_rx_flow_spec *fs,
6205                                       struct hclge_fd_rule *rule, u8 ip_proto)
6206 {
6207         rule->tuples.src_ip[IPV4_INDEX] =
6208                         be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6209         rule->tuples_mask.src_ip[IPV4_INDEX] =
6210                         be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6211
6212         rule->tuples.dst_ip[IPV4_INDEX] =
6213                         be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6214         rule->tuples_mask.dst_ip[IPV4_INDEX] =
6215                         be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6216
6217         rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6218         rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6219
6220         rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6221         rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6222
6223         rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6224         rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6225
6226         rule->tuples.ether_proto = ETH_P_IP;
6227         rule->tuples_mask.ether_proto = 0xFFFF;
6228
6229         rule->tuples.ip_proto = ip_proto;
6230         rule->tuples_mask.ip_proto = 0xFF;
6231 }
6232
6233 static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
6234                                    struct ethtool_rx_flow_spec *fs,
6235                                    struct hclge_fd_rule *rule)
6236 {
6237         rule->tuples.src_ip[IPV4_INDEX] =
6238                         be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6239         rule->tuples_mask.src_ip[IPV4_INDEX] =
6240                         be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6241
6242         rule->tuples.dst_ip[IPV4_INDEX] =
6243                         be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6244         rule->tuples_mask.dst_ip[IPV4_INDEX] =
6245                         be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6246
6247         rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6248         rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6249
6250         rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6251         rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6252
6253         rule->tuples.ether_proto = ETH_P_IP;
6254         rule->tuples_mask.ether_proto = 0xFFFF;
6255 }
6256
6257 static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
6258                                       struct ethtool_rx_flow_spec *fs,
6259                                       struct hclge_fd_rule *rule, u8 ip_proto)
6260 {
6261         be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6262                           IPV6_SIZE);
6263         be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6264                           IPV6_SIZE);
6265
6266         be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6267                           IPV6_SIZE);
6268         be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6269                           IPV6_SIZE);
6270
6271         rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6272         rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6273
6274         rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6275         rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6276
6277         rule->tuples.ether_proto = ETH_P_IPV6;
6278         rule->tuples_mask.ether_proto = 0xFFFF;
6279
6280         rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6281         rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6282
6283         rule->tuples.ip_proto = ip_proto;
6284         rule->tuples_mask.ip_proto = 0xFF;
6285 }
6286
6287 static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6288                                    struct ethtool_rx_flow_spec *fs,
6289                                    struct hclge_fd_rule *rule)
6290 {
6291         be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6292                           IPV6_SIZE);
6293         be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6294                           IPV6_SIZE);
6295
6296         be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6297                           IPV6_SIZE);
6298         be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6299                           IPV6_SIZE);
6300
6301         rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6302         rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6303
6304         rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6305         rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6306
6307         rule->tuples.ether_proto = ETH_P_IPV6;
6308         rule->tuples_mask.ether_proto = 0xFFFF;
6309 }
6310
6311 static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6312                                      struct ethtool_rx_flow_spec *fs,
6313                                      struct hclge_fd_rule *rule)
6314 {
6315         ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6316         ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6317
6318         ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6319         ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6320
6321         rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6322         rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6323 }
6324
6325 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6326                                         struct hclge_fd_rule *rule)
6327 {
6328         switch (info->layer) {
6329         case HCLGE_FD_USER_DEF_L2:
6330                 rule->tuples.l2_user_def = info->data;
6331                 rule->tuples_mask.l2_user_def = info->data_mask;
6332                 break;
6333         case HCLGE_FD_USER_DEF_L3:
6334                 rule->tuples.l3_user_def = info->data;
6335                 rule->tuples_mask.l3_user_def = info->data_mask;
6336                 break;
6337         case HCLGE_FD_USER_DEF_L4:
6338                 rule->tuples.l4_user_def = (u32)info->data << 16;
6339                 rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6340                 break;
6341         default:
6342                 break;
6343         }
6344
6345         rule->ep.user_def = *info;
6346 }
6347
6348 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6349                               struct ethtool_rx_flow_spec *fs,
6350                               struct hclge_fd_rule *rule,
6351                               struct hclge_fd_user_def_info *info)
6352 {
6353         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6354
6355         switch (flow_type) {
6356         case SCTP_V4_FLOW:
6357                 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6358                 break;
6359         case TCP_V4_FLOW:
6360                 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6361                 break;
6362         case UDP_V4_FLOW:
6363                 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6364                 break;
6365         case IP_USER_FLOW:
6366                 hclge_fd_get_ip4_tuple(hdev, fs, rule);
6367                 break;
6368         case SCTP_V6_FLOW:
6369                 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6370                 break;
6371         case TCP_V6_FLOW:
6372                 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6373                 break;
6374         case UDP_V6_FLOW:
6375                 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6376                 break;
6377         case IPV6_USER_FLOW:
6378                 hclge_fd_get_ip6_tuple(hdev, fs, rule);
6379                 break;
6380         case ETHER_FLOW:
6381                 hclge_fd_get_ether_tuple(hdev, fs, rule);
6382                 break;
6383         default:
6384                 return -EOPNOTSUPP;
6385         }
6386
6387         if (fs->flow_type & FLOW_EXT) {
6388                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6389                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6390                 hclge_fd_get_user_def_tuple(info, rule);
6391         }
6392
6393         if (fs->flow_type & FLOW_MAC_EXT) {
6394                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6395                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6396         }
6397
6398         return 0;
6399 }
6400
6401 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6402                                 struct hclge_fd_rule *rule)
6403 {
6404         int ret;
6405
6406         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6407         if (ret)
6408                 return ret;
6409
6410         return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6411 }
6412
6413 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6414                                      struct hclge_fd_rule *rule)
6415 {
6416         int ret;
6417
6418         spin_lock_bh(&hdev->fd_rule_lock);
6419
6420         if (hdev->fd_active_type != rule->rule_type &&
6421             (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6422              hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6423                 dev_err(&hdev->pdev->dev,
6424                         "mode conflict(new type %d, active type %d), please delete existent rules first\n",
6425                         rule->rule_type, hdev->fd_active_type);
6426                 spin_unlock_bh(&hdev->fd_rule_lock);
6427                 return -EINVAL;
6428         }
6429
6430         ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6431         if (ret)
6432                 goto out;
6433
6434         ret = hclge_clear_arfs_rules(hdev);
6435         if (ret)
6436                 goto out;
6437
6438         ret = hclge_fd_config_rule(hdev, rule);
6439         if (ret)
6440                 goto out;
6441
6442         rule->state = HCLGE_FD_ACTIVE;
6443         hdev->fd_active_type = rule->rule_type;
6444         hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6445
6446 out:
6447         spin_unlock_bh(&hdev->fd_rule_lock);
6448         return ret;
6449 }
6450
6451 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6452 {
6453         struct hclge_vport *vport = hclge_get_vport(handle);
6454         struct hclge_dev *hdev = vport->back;
6455
6456         return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6457 }
6458
6459 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6460                                       u16 *vport_id, u8 *action, u16 *queue_id)
6461 {
6462         struct hclge_vport *vport = hdev->vport;
6463
6464         if (ring_cookie == RX_CLS_FLOW_DISC) {
6465                 *action = HCLGE_FD_ACTION_DROP_PACKET;
6466         } else {
6467                 u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6468                 u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6469                 u16 tqps;
6470
6471                 if (vf > hdev->num_req_vfs) {
6472                         dev_err(&hdev->pdev->dev,
6473                                 "Error: vf id (%u) > max vf num (%u)\n",
6474                                 vf, hdev->num_req_vfs);
6475                         return -EINVAL;
6476                 }
6477
6478                 *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6479                 tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6480
6481                 if (ring >= tqps) {
6482                         dev_err(&hdev->pdev->dev,
6483                                 "Error: queue id (%u) > max tqp num (%u)\n",
6484                                 ring, tqps - 1);
6485                         return -EINVAL;
6486                 }
6487
6488                 *action = HCLGE_FD_ACTION_SELECT_QUEUE;
6489                 *queue_id = ring;
6490         }
6491
6492         return 0;
6493 }
6494
6495 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6496                               struct ethtool_rxnfc *cmd)
6497 {
6498         struct hclge_vport *vport = hclge_get_vport(handle);
6499         struct hclge_dev *hdev = vport->back;
6500         struct hclge_fd_user_def_info info;
6501         u16 dst_vport_id = 0, q_index = 0;
6502         struct ethtool_rx_flow_spec *fs;
6503         struct hclge_fd_rule *rule;
6504         u32 unused = 0;
6505         u8 action;
6506         int ret;
6507
6508         if (!hnae3_dev_fd_supported(hdev)) {
6509                 dev_err(&hdev->pdev->dev,
6510                         "flow table director is not supported\n");
6511                 return -EOPNOTSUPP;
6512         }
6513
6514         if (!hdev->fd_en) {
6515                 dev_err(&hdev->pdev->dev,
6516                         "please enable flow director first\n");
6517                 return -EOPNOTSUPP;
6518         }
6519
6520         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6521
6522         ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6523         if (ret)
6524                 return ret;
6525
6526         ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6527                                          &action, &q_index);
6528         if (ret)
6529                 return ret;
6530
6531         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6532         if (!rule)
6533                 return -ENOMEM;
6534
6535         ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
6536         if (ret) {
6537                 kfree(rule);
6538                 return ret;
6539         }
6540
6541         rule->flow_type = fs->flow_type;
6542         rule->location = fs->location;
6543         rule->unused_tuple = unused;
6544         rule->vf_id = dst_vport_id;
6545         rule->queue_id = q_index;
6546         rule->action = action;
6547         rule->rule_type = HCLGE_FD_EP_ACTIVE;
6548
6549         ret = hclge_add_fd_entry_common(hdev, rule);
6550         if (ret)
6551                 kfree(rule);
6552
6553         return ret;
6554 }
6555
6556 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6557                               struct ethtool_rxnfc *cmd)
6558 {
6559         struct hclge_vport *vport = hclge_get_vport(handle);
6560         struct hclge_dev *hdev = vport->back;
6561         struct ethtool_rx_flow_spec *fs;
6562         int ret;
6563
6564         if (!hnae3_dev_fd_supported(hdev))
6565                 return -EOPNOTSUPP;
6566
6567         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6568
6569         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6570                 return -EINVAL;
6571
6572         spin_lock_bh(&hdev->fd_rule_lock);
6573         if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6574             !test_bit(fs->location, hdev->fd_bmap)) {
6575                 dev_err(&hdev->pdev->dev,
6576                         "Delete fail, rule %u is inexistent\n", fs->location);
6577                 spin_unlock_bh(&hdev->fd_rule_lock);
6578                 return -ENOENT;
6579         }
6580
6581         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6582                                    NULL, false);
6583         if (ret)
6584                 goto out;
6585
6586         hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6587
6588 out:
6589         spin_unlock_bh(&hdev->fd_rule_lock);
6590         return ret;
6591 }
6592
6593 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6594                                          bool clear_list)
6595 {
6596         struct hclge_fd_rule *rule;
6597         struct hlist_node *node;
6598         u16 location;
6599
6600         if (!hnae3_dev_fd_supported(hdev))
6601                 return;
6602
6603         spin_lock_bh(&hdev->fd_rule_lock);
6604
6605         for_each_set_bit(location, hdev->fd_bmap,
6606                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6607                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6608                                      NULL, false);
6609
6610         if (clear_list) {
6611                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6612                                           rule_node) {
6613                         hlist_del(&rule->rule_node);
6614                         kfree(rule);
6615                 }
6616                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6617                 hdev->hclge_fd_rule_num = 0;
6618                 bitmap_zero(hdev->fd_bmap,
6619                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6620         }
6621
6622         spin_unlock_bh(&hdev->fd_rule_lock);
6623 }
6624
6625 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6626 {
6627         hclge_clear_fd_rules_in_list(hdev, true);
6628         hclge_fd_disable_user_def(hdev);
6629 }
6630
6631 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6632 {
6633         struct hclge_vport *vport = hclge_get_vport(handle);
6634         struct hclge_dev *hdev = vport->back;
6635         struct hclge_fd_rule *rule;
6636         struct hlist_node *node;
6637
6638         /* Return ok here, because reset error handling will check this
6639          * return value. If error is returned here, the reset process will
6640          * fail.
6641          */
6642         if (!hnae3_dev_fd_supported(hdev))
6643                 return 0;
6644
6645         /* if fd is disabled, should not restore it when reset */
6646         if (!hdev->fd_en)
6647                 return 0;
6648
6649         spin_lock_bh(&hdev->fd_rule_lock);
6650         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6651                 if (rule->state == HCLGE_FD_ACTIVE)
6652                         rule->state = HCLGE_FD_TO_ADD;
6653         }
6654         spin_unlock_bh(&hdev->fd_rule_lock);
6655         set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6656
6657         return 0;
6658 }
6659
6660 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6661                                  struct ethtool_rxnfc *cmd)
6662 {
6663         struct hclge_vport *vport = hclge_get_vport(handle);
6664         struct hclge_dev *hdev = vport->back;
6665
6666         if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6667                 return -EOPNOTSUPP;
6668
6669         cmd->rule_cnt = hdev->hclge_fd_rule_num;
6670         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6671
6672         return 0;
6673 }
6674
6675 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6676                                      struct ethtool_tcpip4_spec *spec,
6677                                      struct ethtool_tcpip4_spec *spec_mask)
6678 {
6679         spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6680         spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6681                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6682
6683         spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6684         spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6685                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6686
6687         spec->psrc = cpu_to_be16(rule->tuples.src_port);
6688         spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6689                         0 : cpu_to_be16(rule->tuples_mask.src_port);
6690
6691         spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6692         spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6693                         0 : cpu_to_be16(rule->tuples_mask.dst_port);
6694
6695         spec->tos = rule->tuples.ip_tos;
6696         spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6697                         0 : rule->tuples_mask.ip_tos;
6698 }
6699
6700 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6701                                   struct ethtool_usrip4_spec *spec,
6702                                   struct ethtool_usrip4_spec *spec_mask)
6703 {
6704         spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6705         spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6706                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6707
6708         spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6709         spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6710                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6711
6712         spec->tos = rule->tuples.ip_tos;
6713         spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6714                         0 : rule->tuples_mask.ip_tos;
6715
6716         spec->proto = rule->tuples.ip_proto;
6717         spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6718                         0 : rule->tuples_mask.ip_proto;
6719
6720         spec->ip_ver = ETH_RX_NFC_IP4;
6721 }
6722
6723 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6724                                      struct ethtool_tcpip6_spec *spec,
6725                                      struct ethtool_tcpip6_spec *spec_mask)
6726 {
6727         cpu_to_be32_array(spec->ip6src,
6728                           rule->tuples.src_ip, IPV6_SIZE);
6729         cpu_to_be32_array(spec->ip6dst,
6730                           rule->tuples.dst_ip, IPV6_SIZE);
6731         if (rule->unused_tuple & BIT(INNER_SRC_IP))
6732                 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6733         else
6734                 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6735                                   IPV6_SIZE);
6736
6737         if (rule->unused_tuple & BIT(INNER_DST_IP))
6738                 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6739         else
6740                 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6741                                   IPV6_SIZE);
6742
6743         spec->tclass = rule->tuples.ip_tos;
6744         spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6745                         0 : rule->tuples_mask.ip_tos;
6746
6747         spec->psrc = cpu_to_be16(rule->tuples.src_port);
6748         spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6749                         0 : cpu_to_be16(rule->tuples_mask.src_port);
6750
6751         spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6752         spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6753                         0 : cpu_to_be16(rule->tuples_mask.dst_port);
6754 }
6755
6756 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6757                                   struct ethtool_usrip6_spec *spec,
6758                                   struct ethtool_usrip6_spec *spec_mask)
6759 {
6760         cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6761         cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6762         if (rule->unused_tuple & BIT(INNER_SRC_IP))
6763                 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6764         else
6765                 cpu_to_be32_array(spec_mask->ip6src,
6766                                   rule->tuples_mask.src_ip, IPV6_SIZE);
6767
6768         if (rule->unused_tuple & BIT(INNER_DST_IP))
6769                 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6770         else
6771                 cpu_to_be32_array(spec_mask->ip6dst,
6772                                   rule->tuples_mask.dst_ip, IPV6_SIZE);
6773
6774         spec->tclass = rule->tuples.ip_tos;
6775         spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6776                         0 : rule->tuples_mask.ip_tos;
6777
6778         spec->l4_proto = rule->tuples.ip_proto;
6779         spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6780                         0 : rule->tuples_mask.ip_proto;
6781 }
6782
6783 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6784                                     struct ethhdr *spec,
6785                                     struct ethhdr *spec_mask)
6786 {
6787         ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6788         ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6789
6790         if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6791                 eth_zero_addr(spec_mask->h_source);
6792         else
6793                 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6794
6795         if (rule->unused_tuple & BIT(INNER_DST_MAC))
6796                 eth_zero_addr(spec_mask->h_dest);
6797         else
6798                 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6799
6800         spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6801         spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6802                         0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6803 }
6804
6805 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
6806                                        struct hclge_fd_rule *rule)
6807 {
6808         if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
6809             HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
6810                 fs->h_ext.data[0] = 0;
6811                 fs->h_ext.data[1] = 0;
6812                 fs->m_ext.data[0] = 0;
6813                 fs->m_ext.data[1] = 0;
6814         } else {
6815                 fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
6816                 fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
6817                 fs->m_ext.data[0] =
6818                                 cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
6819                 fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
6820         }
6821 }
6822
6823 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6824                                   struct hclge_fd_rule *rule)
6825 {
6826         if (fs->flow_type & FLOW_EXT) {
6827                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6828                 fs->m_ext.vlan_tci =
6829                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6830                                 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6831
6832                 hclge_fd_get_user_def_info(fs, rule);
6833         }
6834
6835         if (fs->flow_type & FLOW_MAC_EXT) {
6836                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6837                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6838                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
6839                 else
6840                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
6841                                         rule->tuples_mask.dst_mac);
6842         }
6843 }
6844
6845 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6846                                   struct ethtool_rxnfc *cmd)
6847 {
6848         struct hclge_vport *vport = hclge_get_vport(handle);
6849         struct hclge_fd_rule *rule = NULL;
6850         struct hclge_dev *hdev = vport->back;
6851         struct ethtool_rx_flow_spec *fs;
6852         struct hlist_node *node2;
6853
6854         if (!hnae3_dev_fd_supported(hdev))
6855                 return -EOPNOTSUPP;
6856
6857         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6858
6859         spin_lock_bh(&hdev->fd_rule_lock);
6860
6861         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6862                 if (rule->location >= fs->location)
6863                         break;
6864         }
6865
6866         if (!rule || fs->location != rule->location) {
6867                 spin_unlock_bh(&hdev->fd_rule_lock);
6868
6869                 return -ENOENT;
6870         }
6871
6872         fs->flow_type = rule->flow_type;
6873         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6874         case SCTP_V4_FLOW:
6875         case TCP_V4_FLOW:
6876         case UDP_V4_FLOW:
6877                 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6878                                          &fs->m_u.tcp_ip4_spec);
6879                 break;
6880         case IP_USER_FLOW:
6881                 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6882                                       &fs->m_u.usr_ip4_spec);
6883                 break;
6884         case SCTP_V6_FLOW:
6885         case TCP_V6_FLOW:
6886         case UDP_V6_FLOW:
6887                 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6888                                          &fs->m_u.tcp_ip6_spec);
6889                 break;
6890         case IPV6_USER_FLOW:
6891                 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6892                                       &fs->m_u.usr_ip6_spec);
6893                 break;
6894         /* The flow type of fd rule has been checked before adding in to rule
6895          * list. As other flow types have been handled, it must be ETHER_FLOW
6896          * for the default case
6897          */
6898         default:
6899                 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6900                                         &fs->m_u.ether_spec);
6901                 break;
6902         }
6903
6904         hclge_fd_get_ext_info(fs, rule);
6905
6906         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6907                 fs->ring_cookie = RX_CLS_FLOW_DISC;
6908         } else {
6909                 u64 vf_id;
6910
6911                 fs->ring_cookie = rule->queue_id;
6912                 vf_id = rule->vf_id;
6913                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6914                 fs->ring_cookie |= vf_id;
6915         }
6916
6917         spin_unlock_bh(&hdev->fd_rule_lock);
6918
6919         return 0;
6920 }
6921
6922 static int hclge_get_all_rules(struct hnae3_handle *handle,
6923                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
6924 {
6925         struct hclge_vport *vport = hclge_get_vport(handle);
6926         struct hclge_dev *hdev = vport->back;
6927         struct hclge_fd_rule *rule;
6928         struct hlist_node *node2;
6929         int cnt = 0;
6930
6931         if (!hnae3_dev_fd_supported(hdev))
6932                 return -EOPNOTSUPP;
6933
6934         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6935
6936         spin_lock_bh(&hdev->fd_rule_lock);
6937         hlist_for_each_entry_safe(rule, node2,
6938                                   &hdev->fd_rule_list, rule_node) {
6939                 if (cnt == cmd->rule_cnt) {
6940                         spin_unlock_bh(&hdev->fd_rule_lock);
6941                         return -EMSGSIZE;
6942                 }
6943
6944                 if (rule->state == HCLGE_FD_TO_DEL)
6945                         continue;
6946
6947                 rule_locs[cnt] = rule->location;
6948                 cnt++;
6949         }
6950
6951         spin_unlock_bh(&hdev->fd_rule_lock);
6952
6953         cmd->rule_cnt = cnt;
6954
6955         return 0;
6956 }
6957
6958 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6959                                      struct hclge_fd_rule_tuples *tuples)
6960 {
6961 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6962 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6963
6964         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6965         tuples->ip_proto = fkeys->basic.ip_proto;
6966         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6967
6968         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6969                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6970                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6971         } else {
6972                 int i;
6973
6974                 for (i = 0; i < IPV6_SIZE; i++) {
6975                         tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6976                         tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6977                 }
6978         }
6979 }
6980
6981 /* traverse all rules, check whether an existed rule has the same tuples */
6982 static struct hclge_fd_rule *
6983 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6984                           const struct hclge_fd_rule_tuples *tuples)
6985 {
6986         struct hclge_fd_rule *rule = NULL;
6987         struct hlist_node *node;
6988
6989         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6990                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6991                         return rule;
6992         }
6993
6994         return NULL;
6995 }
6996
6997 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6998                                      struct hclge_fd_rule *rule)
6999 {
7000         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
7001                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
7002                              BIT(INNER_SRC_PORT);
7003         rule->action = 0;
7004         rule->vf_id = 0;
7005         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
7006         rule->state = HCLGE_FD_TO_ADD;
7007         if (tuples->ether_proto == ETH_P_IP) {
7008                 if (tuples->ip_proto == IPPROTO_TCP)
7009                         rule->flow_type = TCP_V4_FLOW;
7010                 else
7011                         rule->flow_type = UDP_V4_FLOW;
7012         } else {
7013                 if (tuples->ip_proto == IPPROTO_TCP)
7014                         rule->flow_type = TCP_V6_FLOW;
7015                 else
7016                         rule->flow_type = UDP_V6_FLOW;
7017         }
7018         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
7019         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7020 }
7021
7022 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
7023                                       u16 flow_id, struct flow_keys *fkeys)
7024 {
7025         struct hclge_vport *vport = hclge_get_vport(handle);
7026         struct hclge_fd_rule_tuples new_tuples = {};
7027         struct hclge_dev *hdev = vport->back;
7028         struct hclge_fd_rule *rule;
7029         u16 bit_id;
7030
7031         if (!hnae3_dev_fd_supported(hdev))
7032                 return -EOPNOTSUPP;
7033
7034         /* when there is already fd rule existed add by user,
7035          * arfs should not work
7036          */
7037         spin_lock_bh(&hdev->fd_rule_lock);
7038         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7039             hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7040                 spin_unlock_bh(&hdev->fd_rule_lock);
7041                 return -EOPNOTSUPP;
7042         }
7043
7044         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7045
7046         /* check is there flow director filter existed for this flow,
7047          * if not, create a new filter for it;
7048          * if filter exist with different queue id, modify the filter;
7049          * if filter exist with same queue id, do nothing
7050          */
7051         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7052         if (!rule) {
7053                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7054                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7055                         spin_unlock_bh(&hdev->fd_rule_lock);
7056                         return -ENOSPC;
7057                 }
7058
7059                 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7060                 if (!rule) {
7061                         spin_unlock_bh(&hdev->fd_rule_lock);
7062                         return -ENOMEM;
7063                 }
7064
7065                 rule->location = bit_id;
7066                 rule->arfs.flow_id = flow_id;
7067                 rule->queue_id = queue_id;
7068                 hclge_fd_build_arfs_rule(&new_tuples, rule);
7069                 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7070                 hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7071         } else if (rule->queue_id != queue_id) {
7072                 rule->queue_id = queue_id;
7073                 rule->state = HCLGE_FD_TO_ADD;
7074                 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7075                 hclge_task_schedule(hdev, 0);
7076         }
7077         spin_unlock_bh(&hdev->fd_rule_lock);
7078         return rule->location;
7079 }
7080
7081 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7082 {
7083 #ifdef CONFIG_RFS_ACCEL
7084         struct hnae3_handle *handle = &hdev->vport[0].nic;
7085         struct hclge_fd_rule *rule;
7086         struct hlist_node *node;
7087
7088         spin_lock_bh(&hdev->fd_rule_lock);
7089         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7090                 spin_unlock_bh(&hdev->fd_rule_lock);
7091                 return;
7092         }
7093         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7094                 if (rule->state != HCLGE_FD_ACTIVE)
7095                         continue;
7096                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7097                                         rule->arfs.flow_id, rule->location)) {
7098                         rule->state = HCLGE_FD_TO_DEL;
7099                         set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7100                 }
7101         }
7102         spin_unlock_bh(&hdev->fd_rule_lock);
7103 #endif
7104 }
7105
7106 /* make sure being called after lock up with fd_rule_lock */
7107 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7108 {
7109 #ifdef CONFIG_RFS_ACCEL
7110         struct hclge_fd_rule *rule;
7111         struct hlist_node *node;
7112         int ret;
7113
7114         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7115                 return 0;
7116
7117         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7118                 switch (rule->state) {
7119                 case HCLGE_FD_TO_DEL:
7120                 case HCLGE_FD_ACTIVE:
7121                         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7122                                                    rule->location, NULL, false);
7123                         if (ret)
7124                                 return ret;
7125                         fallthrough;
7126                 case HCLGE_FD_TO_ADD:
7127                         hclge_fd_dec_rule_cnt(hdev, rule->location);
7128                         hlist_del(&rule->rule_node);
7129                         kfree(rule);
7130                         break;
7131                 default:
7132                         break;
7133                 }
7134         }
7135         hclge_sync_fd_state(hdev);
7136
7137 #endif
7138         return 0;
7139 }
7140
7141 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7142                                     struct hclge_fd_rule *rule)
7143 {
7144         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7145                 struct flow_match_basic match;
7146                 u16 ethtype_key, ethtype_mask;
7147
7148                 flow_rule_match_basic(flow, &match);
7149                 ethtype_key = ntohs(match.key->n_proto);
7150                 ethtype_mask = ntohs(match.mask->n_proto);
7151
7152                 if (ethtype_key == ETH_P_ALL) {
7153                         ethtype_key = 0;
7154                         ethtype_mask = 0;
7155                 }
7156                 rule->tuples.ether_proto = ethtype_key;
7157                 rule->tuples_mask.ether_proto = ethtype_mask;
7158                 rule->tuples.ip_proto = match.key->ip_proto;
7159                 rule->tuples_mask.ip_proto = match.mask->ip_proto;
7160         } else {
7161                 rule->unused_tuple |= BIT(INNER_IP_PROTO);
7162                 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7163         }
7164 }
7165
7166 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7167                                   struct hclge_fd_rule *rule)
7168 {
7169         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7170                 struct flow_match_eth_addrs match;
7171
7172                 flow_rule_match_eth_addrs(flow, &match);
7173                 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7174                 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7175                 ether_addr_copy(rule->tuples.src_mac, match.key->src);
7176                 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7177         } else {
7178                 rule->unused_tuple |= BIT(INNER_DST_MAC);
7179                 rule->unused_tuple |= BIT(INNER_SRC_MAC);
7180         }
7181 }
7182
7183 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7184                                    struct hclge_fd_rule *rule)
7185 {
7186         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7187                 struct flow_match_vlan match;
7188
7189                 flow_rule_match_vlan(flow, &match);
7190                 rule->tuples.vlan_tag1 = match.key->vlan_id |
7191                                 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
7192                 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7193                                 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7194         } else {
7195                 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7196         }
7197 }
7198
7199 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7200                                  struct hclge_fd_rule *rule)
7201 {
7202         u16 addr_type = 0;
7203
7204         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7205                 struct flow_match_control match;
7206
7207                 flow_rule_match_control(flow, &match);
7208                 addr_type = match.key->addr_type;
7209         }
7210
7211         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7212                 struct flow_match_ipv4_addrs match;
7213
7214                 flow_rule_match_ipv4_addrs(flow, &match);
7215                 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7216                 rule->tuples_mask.src_ip[IPV4_INDEX] =
7217                                                 be32_to_cpu(match.mask->src);
7218                 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7219                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
7220                                                 be32_to_cpu(match.mask->dst);
7221         } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7222                 struct flow_match_ipv6_addrs match;
7223
7224                 flow_rule_match_ipv6_addrs(flow, &match);
7225                 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7226                                   IPV6_SIZE);
7227                 be32_to_cpu_array(rule->tuples_mask.src_ip,
7228                                   match.mask->src.s6_addr32, IPV6_SIZE);
7229                 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7230                                   IPV6_SIZE);
7231                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
7232                                   match.mask->dst.s6_addr32, IPV6_SIZE);
7233         } else {
7234                 rule->unused_tuple |= BIT(INNER_SRC_IP);
7235                 rule->unused_tuple |= BIT(INNER_DST_IP);
7236         }
7237 }
7238
7239 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7240                                    struct hclge_fd_rule *rule)
7241 {
7242         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7243                 struct flow_match_ports match;
7244
7245                 flow_rule_match_ports(flow, &match);
7246
7247                 rule->tuples.src_port = be16_to_cpu(match.key->src);
7248                 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7249                 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7250                 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7251         } else {
7252                 rule->unused_tuple |= BIT(INNER_SRC_PORT);
7253                 rule->unused_tuple |= BIT(INNER_DST_PORT);
7254         }
7255 }
7256
7257 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7258                                   struct flow_cls_offload *cls_flower,
7259                                   struct hclge_fd_rule *rule)
7260 {
7261         struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7262         struct flow_dissector *dissector = flow->match.dissector;
7263
7264         if (dissector->used_keys &
7265             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7266               BIT(FLOW_DISSECTOR_KEY_BASIC) |
7267               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7268               BIT(FLOW_DISSECTOR_KEY_VLAN) |
7269               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7270               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7271               BIT(FLOW_DISSECTOR_KEY_PORTS))) {
7272                 dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
7273                         dissector->used_keys);
7274                 return -EOPNOTSUPP;
7275         }
7276
7277         hclge_get_cls_key_basic(flow, rule);
7278         hclge_get_cls_key_mac(flow, rule);
7279         hclge_get_cls_key_vlan(flow, rule);
7280         hclge_get_cls_key_ip(flow, rule);
7281         hclge_get_cls_key_port(flow, rule);
7282
7283         return 0;
7284 }
7285
7286 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7287                                   struct flow_cls_offload *cls_flower, int tc)
7288 {
7289         u32 prio = cls_flower->common.prio;
7290
7291         if (tc < 0 || tc > hdev->tc_max) {
7292                 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7293                 return -EINVAL;
7294         }
7295
7296         if (prio == 0 ||
7297             prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7298                 dev_err(&hdev->pdev->dev,
7299                         "prio %u should be in range[1, %u]\n",
7300                         prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7301                 return -EINVAL;
7302         }
7303
7304         if (test_bit(prio - 1, hdev->fd_bmap)) {
7305                 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7306                 return -EINVAL;
7307         }
7308         return 0;
7309 }
7310
7311 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7312                                 struct flow_cls_offload *cls_flower,
7313                                 int tc)
7314 {
7315         struct hclge_vport *vport = hclge_get_vport(handle);
7316         struct hclge_dev *hdev = vport->back;
7317         struct hclge_fd_rule *rule;
7318         int ret;
7319
7320         ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7321         if (ret) {
7322                 dev_err(&hdev->pdev->dev,
7323                         "failed to check cls flower params, ret = %d\n", ret);
7324                 return ret;
7325         }
7326
7327         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7328         if (!rule)
7329                 return -ENOMEM;
7330
7331         ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7332         if (ret) {
7333                 kfree(rule);
7334                 return ret;
7335         }
7336
7337         rule->action = HCLGE_FD_ACTION_SELECT_TC;
7338         rule->cls_flower.tc = tc;
7339         rule->location = cls_flower->common.prio - 1;
7340         rule->vf_id = 0;
7341         rule->cls_flower.cookie = cls_flower->cookie;
7342         rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7343
7344         ret = hclge_add_fd_entry_common(hdev, rule);
7345         if (ret)
7346                 kfree(rule);
7347
7348         return ret;
7349 }
7350
7351 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7352                                                    unsigned long cookie)
7353 {
7354         struct hclge_fd_rule *rule;
7355         struct hlist_node *node;
7356
7357         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7358                 if (rule->cls_flower.cookie == cookie)
7359                         return rule;
7360         }
7361
7362         return NULL;
7363 }
7364
7365 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7366                                 struct flow_cls_offload *cls_flower)
7367 {
7368         struct hclge_vport *vport = hclge_get_vport(handle);
7369         struct hclge_dev *hdev = vport->back;
7370         struct hclge_fd_rule *rule;
7371         int ret;
7372
7373         spin_lock_bh(&hdev->fd_rule_lock);
7374
7375         rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7376         if (!rule) {
7377                 spin_unlock_bh(&hdev->fd_rule_lock);
7378                 return -EINVAL;
7379         }
7380
7381         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7382                                    NULL, false);
7383         if (ret) {
7384                 spin_unlock_bh(&hdev->fd_rule_lock);
7385                 return ret;
7386         }
7387
7388         hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7389         spin_unlock_bh(&hdev->fd_rule_lock);
7390
7391         return 0;
7392 }
7393
7394 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7395 {
7396         struct hclge_fd_rule *rule;
7397         struct hlist_node *node;
7398         int ret = 0;
7399
7400         if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7401                 return;
7402
7403         spin_lock_bh(&hdev->fd_rule_lock);
7404
7405         hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7406                 switch (rule->state) {
7407                 case HCLGE_FD_TO_ADD:
7408                         ret = hclge_fd_config_rule(hdev, rule);
7409                         if (ret)
7410                                 goto out;
7411                         rule->state = HCLGE_FD_ACTIVE;
7412                         break;
7413                 case HCLGE_FD_TO_DEL:
7414                         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7415                                                    rule->location, NULL, false);
7416                         if (ret)
7417                                 goto out;
7418                         hclge_fd_dec_rule_cnt(hdev, rule->location);
7419                         hclge_fd_free_node(hdev, rule);
7420                         break;
7421                 default:
7422                         break;
7423                 }
7424         }
7425
7426 out:
7427         if (ret)
7428                 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7429
7430         spin_unlock_bh(&hdev->fd_rule_lock);
7431 }
7432
7433 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7434 {
7435         if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7436                 bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7437
7438                 hclge_clear_fd_rules_in_list(hdev, clear_list);
7439         }
7440
7441         hclge_sync_fd_user_def_cfg(hdev, false);
7442
7443         hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7444 }
7445
7446 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7447 {
7448         struct hclge_vport *vport = hclge_get_vport(handle);
7449         struct hclge_dev *hdev = vport->back;
7450
7451         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7452                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7453 }
7454
7455 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7456 {
7457         struct hclge_vport *vport = hclge_get_vport(handle);
7458         struct hclge_dev *hdev = vport->back;
7459
7460         return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7461 }
7462
7463 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7464 {
7465         struct hclge_vport *vport = hclge_get_vport(handle);
7466         struct hclge_dev *hdev = vport->back;
7467
7468         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7469 }
7470
7471 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7472 {
7473         struct hclge_vport *vport = hclge_get_vport(handle);
7474         struct hclge_dev *hdev = vport->back;
7475
7476         return hdev->rst_stats.hw_reset_done_cnt;
7477 }
7478
7479 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7480 {
7481         struct hclge_vport *vport = hclge_get_vport(handle);
7482         struct hclge_dev *hdev = vport->back;
7483
7484         hdev->fd_en = enable;
7485
7486         if (!enable)
7487                 set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7488         else
7489                 hclge_restore_fd_entries(handle);
7490
7491         hclge_task_schedule(hdev, 0);
7492 }
7493
7494 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7495 {
7496         struct hclge_desc desc;
7497         struct hclge_config_mac_mode_cmd *req =
7498                 (struct hclge_config_mac_mode_cmd *)desc.data;
7499         u32 loop_en = 0;
7500         int ret;
7501
7502         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7503
7504         if (enable) {
7505                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7506                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7507                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7508                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7509                 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7510                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7511                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7512                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7513                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7514                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7515         }
7516
7517         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7518
7519         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7520         if (ret)
7521                 dev_err(&hdev->pdev->dev,
7522                         "mac enable fail, ret =%d.\n", ret);
7523 }
7524
7525 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7526                                      u8 switch_param, u8 param_mask)
7527 {
7528         struct hclge_mac_vlan_switch_cmd *req;
7529         struct hclge_desc desc;
7530         u32 func_id;
7531         int ret;
7532
7533         func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7534         req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7535
7536         /* read current config parameter */
7537         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7538                                    true);
7539         req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7540         req->func_id = cpu_to_le32(func_id);
7541
7542         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7543         if (ret) {
7544                 dev_err(&hdev->pdev->dev,
7545                         "read mac vlan switch parameter fail, ret = %d\n", ret);
7546                 return ret;
7547         }
7548
7549         /* modify and write new config parameter */
7550         hclge_cmd_reuse_desc(&desc, false);
7551         req->switch_param = (req->switch_param & param_mask) | switch_param;
7552         req->param_mask = param_mask;
7553
7554         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7555         if (ret)
7556                 dev_err(&hdev->pdev->dev,
7557                         "set mac vlan switch parameter fail, ret = %d\n", ret);
7558         return ret;
7559 }
7560
7561 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7562                                        int link_ret)
7563 {
7564 #define HCLGE_PHY_LINK_STATUS_NUM  200
7565
7566         struct phy_device *phydev = hdev->hw.mac.phydev;
7567         int i = 0;
7568         int ret;
7569
7570         do {
7571                 ret = phy_read_status(phydev);
7572                 if (ret) {
7573                         dev_err(&hdev->pdev->dev,
7574                                 "phy update link status fail, ret = %d\n", ret);
7575                         return;
7576                 }
7577
7578                 if (phydev->link == link_ret)
7579                         break;
7580
7581                 msleep(HCLGE_LINK_STATUS_MS);
7582         } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7583 }
7584
7585 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7586 {
7587 #define HCLGE_MAC_LINK_STATUS_NUM  100
7588
7589         int link_status;
7590         int i = 0;
7591         int ret;
7592
7593         do {
7594                 ret = hclge_get_mac_link_status(hdev, &link_status);
7595                 if (ret)
7596                         return ret;
7597                 if (link_status == link_ret)
7598                         return 0;
7599
7600                 msleep(HCLGE_LINK_STATUS_MS);
7601         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7602         return -EBUSY;
7603 }
7604
7605 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7606                                           bool is_phy)
7607 {
7608         int link_ret;
7609
7610         link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7611
7612         if (is_phy)
7613                 hclge_phy_link_status_wait(hdev, link_ret);
7614
7615         return hclge_mac_link_status_wait(hdev, link_ret);
7616 }
7617
7618 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7619 {
7620         struct hclge_config_mac_mode_cmd *req;
7621         struct hclge_desc desc;
7622         u32 loop_en;
7623         int ret;
7624
7625         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7626         /* 1 Read out the MAC mode config at first */
7627         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7628         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7629         if (ret) {
7630                 dev_err(&hdev->pdev->dev,
7631                         "mac loopback get fail, ret =%d.\n", ret);
7632                 return ret;
7633         }
7634
7635         /* 2 Then setup the loopback flag */
7636         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7637         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7638
7639         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7640
7641         /* 3 Config mac work mode with loopback flag
7642          * and its original configure parameters
7643          */
7644         hclge_cmd_reuse_desc(&desc, false);
7645         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7646         if (ret)
7647                 dev_err(&hdev->pdev->dev,
7648                         "mac loopback set fail, ret =%d.\n", ret);
7649         return ret;
7650 }
7651
7652 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7653                                      enum hnae3_loop loop_mode)
7654 {
7655 #define HCLGE_COMMON_LB_RETRY_MS        10
7656 #define HCLGE_COMMON_LB_RETRY_NUM       100
7657
7658         struct hclge_common_lb_cmd *req;
7659         struct hclge_desc desc;
7660         int ret, i = 0;
7661         u8 loop_mode_b;
7662
7663         req = (struct hclge_common_lb_cmd *)desc.data;
7664         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7665
7666         switch (loop_mode) {
7667         case HNAE3_LOOP_SERIAL_SERDES:
7668                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7669                 break;
7670         case HNAE3_LOOP_PARALLEL_SERDES:
7671                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7672                 break;
7673         case HNAE3_LOOP_PHY:
7674                 loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7675                 break;
7676         default:
7677                 dev_err(&hdev->pdev->dev,
7678                         "unsupported common loopback mode %d\n", loop_mode);
7679                 return -ENOTSUPP;
7680         }
7681
7682         if (en) {
7683                 req->enable = loop_mode_b;
7684                 req->mask = loop_mode_b;
7685         } else {
7686                 req->mask = loop_mode_b;
7687         }
7688
7689         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7690         if (ret) {
7691                 dev_err(&hdev->pdev->dev,
7692                         "common loopback set fail, ret = %d\n", ret);
7693                 return ret;
7694         }
7695
7696         do {
7697                 msleep(HCLGE_COMMON_LB_RETRY_MS);
7698                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7699                                            true);
7700                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7701                 if (ret) {
7702                         dev_err(&hdev->pdev->dev,
7703                                 "common loopback get, ret = %d\n", ret);
7704                         return ret;
7705                 }
7706         } while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7707                  !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7708
7709         if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7710                 dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
7711                 return -EBUSY;
7712         } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7713                 dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
7714                 return -EIO;
7715         }
7716         return ret;
7717 }
7718
7719 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7720                                      enum hnae3_loop loop_mode)
7721 {
7722         int ret;
7723
7724         ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7725         if (ret)
7726                 return ret;
7727
7728         hclge_cfg_mac_mode(hdev, en);
7729
7730         ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7731         if (ret)
7732                 dev_err(&hdev->pdev->dev,
7733                         "serdes loopback config mac mode timeout\n");
7734
7735         return ret;
7736 }
7737
7738 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7739                                      struct phy_device *phydev)
7740 {
7741         int ret;
7742
7743         if (!phydev->suspended) {
7744                 ret = phy_suspend(phydev);
7745                 if (ret)
7746                         return ret;
7747         }
7748
7749         ret = phy_resume(phydev);
7750         if (ret)
7751                 return ret;
7752
7753         return phy_loopback(phydev, true);
7754 }
7755
7756 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7757                                       struct phy_device *phydev)
7758 {
7759         int ret;
7760
7761         ret = phy_loopback(phydev, false);
7762         if (ret)
7763                 return ret;
7764
7765         return phy_suspend(phydev);
7766 }
7767
7768 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7769 {
7770         struct phy_device *phydev = hdev->hw.mac.phydev;
7771         int ret;
7772
7773         if (!phydev) {
7774                 if (hnae3_dev_phy_imp_supported(hdev))
7775                         return hclge_set_common_loopback(hdev, en,
7776                                                          HNAE3_LOOP_PHY);
7777                 return -ENOTSUPP;
7778         }
7779
7780         if (en)
7781                 ret = hclge_enable_phy_loopback(hdev, phydev);
7782         else
7783                 ret = hclge_disable_phy_loopback(hdev, phydev);
7784         if (ret) {
7785                 dev_err(&hdev->pdev->dev,
7786                         "set phy loopback fail, ret = %d\n", ret);
7787                 return ret;
7788         }
7789
7790         hclge_cfg_mac_mode(hdev, en);
7791
7792         ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7793         if (ret)
7794                 dev_err(&hdev->pdev->dev,
7795                         "phy loopback config mac mode timeout\n");
7796
7797         return ret;
7798 }
7799
7800 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
7801                                      u16 stream_id, bool enable)
7802 {
7803         struct hclge_desc desc;
7804         struct hclge_cfg_com_tqp_queue_cmd *req =
7805                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7806
7807         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7808         req->tqp_id = cpu_to_le16(tqp_id);
7809         req->stream_id = cpu_to_le16(stream_id);
7810         if (enable)
7811                 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7812
7813         return hclge_cmd_send(&hdev->hw, &desc, 1);
7814 }
7815
7816 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
7817 {
7818         struct hclge_vport *vport = hclge_get_vport(handle);
7819         struct hclge_dev *hdev = vport->back;
7820         int ret;
7821         u16 i;
7822
7823         for (i = 0; i < handle->kinfo.num_tqps; i++) {
7824                 ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
7825                 if (ret)
7826                         return ret;
7827         }
7828         return 0;
7829 }
7830
7831 static int hclge_set_loopback(struct hnae3_handle *handle,
7832                               enum hnae3_loop loop_mode, bool en)
7833 {
7834         struct hclge_vport *vport = hclge_get_vport(handle);
7835         struct hclge_dev *hdev = vport->back;
7836         int ret;
7837
7838         /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7839          * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7840          * the same, the packets are looped back in the SSU. If SSU loopback
7841          * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7842          */
7843         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7844                 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7845
7846                 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7847                                                 HCLGE_SWITCH_ALW_LPBK_MASK);
7848                 if (ret)
7849                         return ret;
7850         }
7851
7852         switch (loop_mode) {
7853         case HNAE3_LOOP_APP:
7854                 ret = hclge_set_app_loopback(hdev, en);
7855                 break;
7856         case HNAE3_LOOP_SERIAL_SERDES:
7857         case HNAE3_LOOP_PARALLEL_SERDES:
7858                 ret = hclge_set_common_loopback(hdev, en, loop_mode);
7859                 break;
7860         case HNAE3_LOOP_PHY:
7861                 ret = hclge_set_phy_loopback(hdev, en);
7862                 break;
7863         default:
7864                 ret = -ENOTSUPP;
7865                 dev_err(&hdev->pdev->dev,
7866                         "loop_mode %d is not supported\n", loop_mode);
7867                 break;
7868         }
7869
7870         if (ret)
7871                 return ret;
7872
7873         ret = hclge_tqp_enable(handle, en);
7874         if (ret)
7875                 dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
7876                         en ? "enable" : "disable", ret);
7877
7878         return ret;
7879 }
7880
7881 static int hclge_set_default_loopback(struct hclge_dev *hdev)
7882 {
7883         int ret;
7884
7885         ret = hclge_set_app_loopback(hdev, false);
7886         if (ret)
7887                 return ret;
7888
7889         ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
7890         if (ret)
7891                 return ret;
7892
7893         return hclge_cfg_common_loopback(hdev, false,
7894                                          HNAE3_LOOP_PARALLEL_SERDES);
7895 }
7896
7897 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
7898 {
7899         struct hclge_vport *vport = hclge_get_vport(handle);
7900         struct hnae3_knic_private_info *kinfo;
7901         struct hnae3_queue *queue;
7902         struct hclge_tqp *tqp;
7903         int i;
7904
7905         kinfo = &vport->nic.kinfo;
7906         for (i = 0; i < kinfo->num_tqps; i++) {
7907                 queue = handle->kinfo.tqp[i];
7908                 tqp = container_of(queue, struct hclge_tqp, q);
7909                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
7910         }
7911 }
7912
7913 static void hclge_flush_link_update(struct hclge_dev *hdev)
7914 {
7915 #define HCLGE_FLUSH_LINK_TIMEOUT        100000
7916
7917         unsigned long last = hdev->serv_processed_cnt;
7918         int i = 0;
7919
7920         while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
7921                i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
7922                last == hdev->serv_processed_cnt)
7923                 usleep_range(1, 1);
7924 }
7925
7926 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
7927 {
7928         struct hclge_vport *vport = hclge_get_vport(handle);
7929         struct hclge_dev *hdev = vport->back;
7930
7931         if (enable) {
7932                 hclge_task_schedule(hdev, 0);
7933         } else {
7934                 /* Set the DOWN flag here to disable link updating */
7935                 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7936
7937                 /* flush memory to make sure DOWN is seen by service task */
7938                 smp_mb__before_atomic();
7939                 hclge_flush_link_update(hdev);
7940         }
7941 }
7942
7943 static int hclge_ae_start(struct hnae3_handle *handle)
7944 {
7945         struct hclge_vport *vport = hclge_get_vport(handle);
7946         struct hclge_dev *hdev = vport->back;
7947
7948         /* mac enable */
7949         hclge_cfg_mac_mode(hdev, true);
7950         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
7951         hdev->hw.mac.link = 0;
7952
7953         /* reset tqp stats */
7954         hclge_reset_tqp_stats(handle);
7955
7956         hclge_mac_start_phy(hdev);
7957
7958         return 0;
7959 }
7960
7961 static void hclge_ae_stop(struct hnae3_handle *handle)
7962 {
7963         struct hclge_vport *vport = hclge_get_vport(handle);
7964         struct hclge_dev *hdev = vport->back;
7965
7966         set_bit(HCLGE_STATE_DOWN, &hdev->state);
7967         spin_lock_bh(&hdev->fd_rule_lock);
7968         hclge_clear_arfs_rules(hdev);
7969         spin_unlock_bh(&hdev->fd_rule_lock);
7970
7971         /* If it is not PF reset, the firmware will disable the MAC,
7972          * so it only need to stop phy here.
7973          */
7974         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
7975             hdev->reset_type != HNAE3_FUNC_RESET) {
7976                 hclge_mac_stop_phy(hdev);
7977                 hclge_update_link_status(hdev);
7978                 return;
7979         }
7980
7981         hclge_reset_tqp(handle);
7982
7983         hclge_config_mac_tnl_int(hdev, false);
7984
7985         /* Mac disable */
7986         hclge_cfg_mac_mode(hdev, false);
7987
7988         hclge_mac_stop_phy(hdev);
7989
7990         /* reset tqp stats */
7991         hclge_reset_tqp_stats(handle);
7992         hclge_update_link_status(hdev);
7993 }
7994
7995 int hclge_vport_start(struct hclge_vport *vport)
7996 {
7997         struct hclge_dev *hdev = vport->back;
7998
7999         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8000         vport->last_active_jiffies = jiffies;
8001
8002         if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8003                 if (vport->vport_id) {
8004                         hclge_restore_mac_table_common(vport);
8005                         hclge_restore_vport_vlan_table(vport);
8006                 } else {
8007                         hclge_restore_hw_table(hdev);
8008                 }
8009         }
8010
8011         clear_bit(vport->vport_id, hdev->vport_config_block);
8012
8013         return 0;
8014 }
8015
8016 void hclge_vport_stop(struct hclge_vport *vport)
8017 {
8018         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8019 }
8020
8021 static int hclge_client_start(struct hnae3_handle *handle)
8022 {
8023         struct hclge_vport *vport = hclge_get_vport(handle);
8024
8025         return hclge_vport_start(vport);
8026 }
8027
8028 static void hclge_client_stop(struct hnae3_handle *handle)
8029 {
8030         struct hclge_vport *vport = hclge_get_vport(handle);
8031
8032         hclge_vport_stop(vport);
8033 }
8034
8035 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8036                                          u16 cmdq_resp, u8  resp_code,
8037                                          enum hclge_mac_vlan_tbl_opcode op)
8038 {
8039         struct hclge_dev *hdev = vport->back;
8040
8041         if (cmdq_resp) {
8042                 dev_err(&hdev->pdev->dev,
8043                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8044                         cmdq_resp);
8045                 return -EIO;
8046         }
8047
8048         if (op == HCLGE_MAC_VLAN_ADD) {
8049                 if (!resp_code || resp_code == 1)
8050                         return 0;
8051                 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8052                          resp_code == HCLGE_ADD_MC_OVERFLOW)
8053                         return -ENOSPC;
8054
8055                 dev_err(&hdev->pdev->dev,
8056                         "add mac addr failed for undefined, code=%u.\n",
8057                         resp_code);
8058                 return -EIO;
8059         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
8060                 if (!resp_code) {
8061                         return 0;
8062                 } else if (resp_code == 1) {
8063                         dev_dbg(&hdev->pdev->dev,
8064                                 "remove mac addr failed for miss.\n");
8065                         return -ENOENT;
8066                 }
8067
8068                 dev_err(&hdev->pdev->dev,
8069                         "remove mac addr failed for undefined, code=%u.\n",
8070                         resp_code);
8071                 return -EIO;
8072         } else if (op == HCLGE_MAC_VLAN_LKUP) {
8073                 if (!resp_code) {
8074                         return 0;
8075                 } else if (resp_code == 1) {
8076                         dev_dbg(&hdev->pdev->dev,
8077                                 "lookup mac addr failed for miss.\n");
8078                         return -ENOENT;
8079                 }
8080
8081                 dev_err(&hdev->pdev->dev,
8082                         "lookup mac addr failed for undefined, code=%u.\n",
8083                         resp_code);
8084                 return -EIO;
8085         }
8086
8087         dev_err(&hdev->pdev->dev,
8088                 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8089
8090         return -EINVAL;
8091 }
8092
8093 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8094 {
8095 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8096
8097         unsigned int word_num;
8098         unsigned int bit_num;
8099
8100         if (vfid > 255 || vfid < 0)
8101                 return -EIO;
8102
8103         if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8104                 word_num = vfid / 32;
8105                 bit_num  = vfid % 32;
8106                 if (clr)
8107                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8108                 else
8109                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8110         } else {
8111                 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8112                 bit_num  = vfid % 32;
8113                 if (clr)
8114                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8115                 else
8116                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8117         }
8118
8119         return 0;
8120 }
8121
8122 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8123 {
8124 #define HCLGE_DESC_NUMBER 3
8125 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8126         int i, j;
8127
8128         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8129                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8130                         if (desc[i].data[j])
8131                                 return false;
8132
8133         return true;
8134 }
8135
8136 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8137                                    const u8 *addr, bool is_mc)
8138 {
8139         const unsigned char *mac_addr = addr;
8140         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8141                        (mac_addr[0]) | (mac_addr[1] << 8);
8142         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
8143
8144         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8145         if (is_mc) {
8146                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8147                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8148         }
8149
8150         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8151         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8152 }
8153
8154 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8155                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
8156 {
8157         struct hclge_dev *hdev = vport->back;
8158         struct hclge_desc desc;
8159         u8 resp_code;
8160         u16 retval;
8161         int ret;
8162
8163         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8164
8165         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8166
8167         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8168         if (ret) {
8169                 dev_err(&hdev->pdev->dev,
8170                         "del mac addr failed for cmd_send, ret =%d.\n",
8171                         ret);
8172                 return ret;
8173         }
8174         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8175         retval = le16_to_cpu(desc.retval);
8176
8177         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8178                                              HCLGE_MAC_VLAN_REMOVE);
8179 }
8180
8181 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8182                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
8183                                      struct hclge_desc *desc,
8184                                      bool is_mc)
8185 {
8186         struct hclge_dev *hdev = vport->back;
8187         u8 resp_code;
8188         u16 retval;
8189         int ret;
8190
8191         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8192         if (is_mc) {
8193                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8194                 memcpy(desc[0].data,
8195                        req,
8196                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8197                 hclge_cmd_setup_basic_desc(&desc[1],
8198                                            HCLGE_OPC_MAC_VLAN_ADD,
8199                                            true);
8200                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8201                 hclge_cmd_setup_basic_desc(&desc[2],
8202                                            HCLGE_OPC_MAC_VLAN_ADD,
8203                                            true);
8204                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
8205         } else {
8206                 memcpy(desc[0].data,
8207                        req,
8208                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8209                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
8210         }
8211         if (ret) {
8212                 dev_err(&hdev->pdev->dev,
8213                         "lookup mac addr failed for cmd_send, ret =%d.\n",
8214                         ret);
8215                 return ret;
8216         }
8217         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8218         retval = le16_to_cpu(desc[0].retval);
8219
8220         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8221                                              HCLGE_MAC_VLAN_LKUP);
8222 }
8223
8224 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8225                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
8226                                   struct hclge_desc *mc_desc)
8227 {
8228         struct hclge_dev *hdev = vport->back;
8229         int cfg_status;
8230         u8 resp_code;
8231         u16 retval;
8232         int ret;
8233
8234         if (!mc_desc) {
8235                 struct hclge_desc desc;
8236
8237                 hclge_cmd_setup_basic_desc(&desc,
8238                                            HCLGE_OPC_MAC_VLAN_ADD,
8239                                            false);
8240                 memcpy(desc.data, req,
8241                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8242                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8243                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8244                 retval = le16_to_cpu(desc.retval);
8245
8246                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8247                                                            resp_code,
8248                                                            HCLGE_MAC_VLAN_ADD);
8249         } else {
8250                 hclge_cmd_reuse_desc(&mc_desc[0], false);
8251                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8252                 hclge_cmd_reuse_desc(&mc_desc[1], false);
8253                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8254                 hclge_cmd_reuse_desc(&mc_desc[2], false);
8255                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
8256                 memcpy(mc_desc[0].data, req,
8257                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8258                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8259                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8260                 retval = le16_to_cpu(mc_desc[0].retval);
8261
8262                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8263                                                            resp_code,
8264                                                            HCLGE_MAC_VLAN_ADD);
8265         }
8266
8267         if (ret) {
8268                 dev_err(&hdev->pdev->dev,
8269                         "add mac addr failed for cmd_send, ret =%d.\n",
8270                         ret);
8271                 return ret;
8272         }
8273
8274         return cfg_status;
8275 }
8276
8277 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8278                                u16 *allocated_size)
8279 {
8280         struct hclge_umv_spc_alc_cmd *req;
8281         struct hclge_desc desc;
8282         int ret;
8283
8284         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8285         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8286
8287         req->space_size = cpu_to_le32(space_size);
8288
8289         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8290         if (ret) {
8291                 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8292                         ret);
8293                 return ret;
8294         }
8295
8296         *allocated_size = le32_to_cpu(desc.data[1]);
8297
8298         return 0;
8299 }
8300
8301 static int hclge_init_umv_space(struct hclge_dev *hdev)
8302 {
8303         u16 allocated_size = 0;
8304         int ret;
8305
8306         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8307         if (ret)
8308                 return ret;
8309
8310         if (allocated_size < hdev->wanted_umv_size)
8311                 dev_warn(&hdev->pdev->dev,
8312                          "failed to alloc umv space, want %u, get %u\n",
8313                          hdev->wanted_umv_size, allocated_size);
8314
8315         hdev->max_umv_size = allocated_size;
8316         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8317         hdev->share_umv_size = hdev->priv_umv_size +
8318                         hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8319
8320         return 0;
8321 }
8322
8323 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8324 {
8325         struct hclge_vport *vport;
8326         int i;
8327
8328         for (i = 0; i < hdev->num_alloc_vport; i++) {
8329                 vport = &hdev->vport[i];
8330                 vport->used_umv_num = 0;
8331         }
8332
8333         mutex_lock(&hdev->vport_lock);
8334         hdev->share_umv_size = hdev->priv_umv_size +
8335                         hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8336         mutex_unlock(&hdev->vport_lock);
8337 }
8338
8339 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8340 {
8341         struct hclge_dev *hdev = vport->back;
8342         bool is_full;
8343
8344         if (need_lock)
8345                 mutex_lock(&hdev->vport_lock);
8346
8347         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8348                    hdev->share_umv_size == 0);
8349
8350         if (need_lock)
8351                 mutex_unlock(&hdev->vport_lock);
8352
8353         return is_full;
8354 }
8355
8356 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8357 {
8358         struct hclge_dev *hdev = vport->back;
8359
8360         if (is_free) {
8361                 if (vport->used_umv_num > hdev->priv_umv_size)
8362                         hdev->share_umv_size++;
8363
8364                 if (vport->used_umv_num > 0)
8365                         vport->used_umv_num--;
8366         } else {
8367                 if (vport->used_umv_num >= hdev->priv_umv_size &&
8368                     hdev->share_umv_size > 0)
8369                         hdev->share_umv_size--;
8370                 vport->used_umv_num++;
8371         }
8372 }
8373
8374 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8375                                                   const u8 *mac_addr)
8376 {
8377         struct hclge_mac_node *mac_node, *tmp;
8378
8379         list_for_each_entry_safe(mac_node, tmp, list, node)
8380                 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8381                         return mac_node;
8382
8383         return NULL;
8384 }
8385
8386 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8387                                   enum HCLGE_MAC_NODE_STATE state)
8388 {
8389         switch (state) {
8390         /* from set_rx_mode or tmp_add_list */
8391         case HCLGE_MAC_TO_ADD:
8392                 if (mac_node->state == HCLGE_MAC_TO_DEL)
8393                         mac_node->state = HCLGE_MAC_ACTIVE;
8394                 break;
8395         /* only from set_rx_mode */
8396         case HCLGE_MAC_TO_DEL:
8397                 if (mac_node->state == HCLGE_MAC_TO_ADD) {
8398                         list_del(&mac_node->node);
8399                         kfree(mac_node);
8400                 } else {
8401                         mac_node->state = HCLGE_MAC_TO_DEL;
8402                 }
8403                 break;
8404         /* only from tmp_add_list, the mac_node->state won't be
8405          * ACTIVE.
8406          */
8407         case HCLGE_MAC_ACTIVE:
8408                 if (mac_node->state == HCLGE_MAC_TO_ADD)
8409                         mac_node->state = HCLGE_MAC_ACTIVE;
8410
8411                 break;
8412         }
8413 }
8414
8415 int hclge_update_mac_list(struct hclge_vport *vport,
8416                           enum HCLGE_MAC_NODE_STATE state,
8417                           enum HCLGE_MAC_ADDR_TYPE mac_type,
8418                           const unsigned char *addr)
8419 {
8420         struct hclge_dev *hdev = vport->back;
8421         struct hclge_mac_node *mac_node;
8422         struct list_head *list;
8423
8424         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8425                 &vport->uc_mac_list : &vport->mc_mac_list;
8426
8427         spin_lock_bh(&vport->mac_list_lock);
8428
8429         /* if the mac addr is already in the mac list, no need to add a new
8430          * one into it, just check the mac addr state, convert it to a new
8431          * state, or just remove it, or do nothing.
8432          */
8433         mac_node = hclge_find_mac_node(list, addr);
8434         if (mac_node) {
8435                 hclge_update_mac_node(mac_node, state);
8436                 spin_unlock_bh(&vport->mac_list_lock);
8437                 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8438                 return 0;
8439         }
8440
8441         /* if this address is never added, unnecessary to delete */
8442         if (state == HCLGE_MAC_TO_DEL) {
8443                 spin_unlock_bh(&vport->mac_list_lock);
8444                 dev_err(&hdev->pdev->dev,
8445                         "failed to delete address %pM from mac list\n",
8446                         addr);
8447                 return -ENOENT;
8448         }
8449
8450         mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8451         if (!mac_node) {
8452                 spin_unlock_bh(&vport->mac_list_lock);
8453                 return -ENOMEM;
8454         }
8455
8456         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8457
8458         mac_node->state = state;
8459         ether_addr_copy(mac_node->mac_addr, addr);
8460         list_add_tail(&mac_node->node, list);
8461
8462         spin_unlock_bh(&vport->mac_list_lock);
8463
8464         return 0;
8465 }
8466
8467 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8468                              const unsigned char *addr)
8469 {
8470         struct hclge_vport *vport = hclge_get_vport(handle);
8471
8472         return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8473                                      addr);
8474 }
8475
8476 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8477                              const unsigned char *addr)
8478 {
8479         struct hclge_dev *hdev = vport->back;
8480         struct hclge_mac_vlan_tbl_entry_cmd req;
8481         struct hclge_desc desc;
8482         u16 egress_port = 0;
8483         int ret;
8484
8485         /* mac addr check */
8486         if (is_zero_ether_addr(addr) ||
8487             is_broadcast_ether_addr(addr) ||
8488             is_multicast_ether_addr(addr)) {
8489                 dev_err(&hdev->pdev->dev,
8490                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8491                          addr, is_zero_ether_addr(addr),
8492                          is_broadcast_ether_addr(addr),
8493                          is_multicast_ether_addr(addr));
8494                 return -EINVAL;
8495         }
8496
8497         memset(&req, 0, sizeof(req));
8498
8499         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8500                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8501
8502         req.egress_port = cpu_to_le16(egress_port);
8503
8504         hclge_prepare_mac_addr(&req, addr, false);
8505
8506         /* Lookup the mac address in the mac_vlan table, and add
8507          * it if the entry is inexistent. Repeated unicast entry
8508          * is not allowed in the mac vlan table.
8509          */
8510         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8511         if (ret == -ENOENT) {
8512                 mutex_lock(&hdev->vport_lock);
8513                 if (!hclge_is_umv_space_full(vport, false)) {
8514                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8515                         if (!ret)
8516                                 hclge_update_umv_space(vport, false);
8517                         mutex_unlock(&hdev->vport_lock);
8518                         return ret;
8519                 }
8520                 mutex_unlock(&hdev->vport_lock);
8521
8522                 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8523                         dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8524                                 hdev->priv_umv_size);
8525
8526                 return -ENOSPC;
8527         }
8528
8529         /* check if we just hit the duplicate */
8530         if (!ret) {
8531                 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
8532                          vport->vport_id, addr);
8533                 return 0;
8534         }
8535
8536         dev_err(&hdev->pdev->dev,
8537                 "PF failed to add unicast entry(%pM) in the MAC table\n",
8538                 addr);
8539
8540         return ret;
8541 }
8542
8543 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8544                             const unsigned char *addr)
8545 {
8546         struct hclge_vport *vport = hclge_get_vport(handle);
8547
8548         return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8549                                      addr);
8550 }
8551
8552 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8553                             const unsigned char *addr)
8554 {
8555         struct hclge_dev *hdev = vport->back;
8556         struct hclge_mac_vlan_tbl_entry_cmd req;
8557         int ret;
8558
8559         /* mac addr check */
8560         if (is_zero_ether_addr(addr) ||
8561             is_broadcast_ether_addr(addr) ||
8562             is_multicast_ether_addr(addr)) {
8563                 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8564                         addr);
8565                 return -EINVAL;
8566         }
8567
8568         memset(&req, 0, sizeof(req));
8569         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8570         hclge_prepare_mac_addr(&req, addr, false);
8571         ret = hclge_remove_mac_vlan_tbl(vport, &req);
8572         if (!ret) {
8573                 mutex_lock(&hdev->vport_lock);
8574                 hclge_update_umv_space(vport, true);
8575                 mutex_unlock(&hdev->vport_lock);
8576         } else if (ret == -ENOENT) {
8577                 ret = 0;
8578         }
8579
8580         return ret;
8581 }
8582
8583 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8584                              const unsigned char *addr)
8585 {
8586         struct hclge_vport *vport = hclge_get_vport(handle);
8587
8588         return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8589                                      addr);
8590 }
8591
8592 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8593                              const unsigned char *addr)
8594 {
8595         struct hclge_dev *hdev = vport->back;
8596         struct hclge_mac_vlan_tbl_entry_cmd req;
8597         struct hclge_desc desc[3];
8598         int status;
8599
8600         /* mac addr check */
8601         if (!is_multicast_ether_addr(addr)) {
8602                 dev_err(&hdev->pdev->dev,
8603                         "Add mc mac err! invalid mac:%pM.\n",
8604                          addr);
8605                 return -EINVAL;
8606         }
8607         memset(&req, 0, sizeof(req));
8608         hclge_prepare_mac_addr(&req, addr, true);
8609         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8610         if (status) {
8611                 /* This mac addr do not exist, add new entry for it */
8612                 memset(desc[0].data, 0, sizeof(desc[0].data));
8613                 memset(desc[1].data, 0, sizeof(desc[0].data));
8614                 memset(desc[2].data, 0, sizeof(desc[0].data));
8615         }
8616         status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8617         if (status)
8618                 return status;
8619         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8620         /* if already overflow, not to print each time */
8621         if (status == -ENOSPC &&
8622             !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8623                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8624
8625         return status;
8626 }
8627
8628 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8629                             const unsigned char *addr)
8630 {
8631         struct hclge_vport *vport = hclge_get_vport(handle);
8632
8633         return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8634                                      addr);
8635 }
8636
8637 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8638                             const unsigned char *addr)
8639 {
8640         struct hclge_dev *hdev = vport->back;
8641         struct hclge_mac_vlan_tbl_entry_cmd req;
8642         enum hclge_cmd_status status;
8643         struct hclge_desc desc[3];
8644
8645         /* mac addr check */
8646         if (!is_multicast_ether_addr(addr)) {
8647                 dev_dbg(&hdev->pdev->dev,
8648                         "Remove mc mac err! invalid mac:%pM.\n",
8649                          addr);
8650                 return -EINVAL;
8651         }
8652
8653         memset(&req, 0, sizeof(req));
8654         hclge_prepare_mac_addr(&req, addr, true);
8655         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8656         if (!status) {
8657                 /* This mac addr exist, remove this handle's VFID for it */
8658                 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8659                 if (status)
8660                         return status;
8661
8662                 if (hclge_is_all_function_id_zero(desc))
8663                         /* All the vfid is zero, so need to delete this entry */
8664                         status = hclge_remove_mac_vlan_tbl(vport, &req);
8665                 else
8666                         /* Not all the vfid is zero, update the vfid */
8667                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8668         } else if (status == -ENOENT) {
8669                 status = 0;
8670         }
8671
8672         return status;
8673 }
8674
8675 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8676                                       struct list_head *list,
8677                                       int (*sync)(struct hclge_vport *,
8678                                                   const unsigned char *))
8679 {
8680         struct hclge_mac_node *mac_node, *tmp;
8681         int ret;
8682
8683         list_for_each_entry_safe(mac_node, tmp, list, node) {
8684                 ret = sync(vport, mac_node->mac_addr);
8685                 if (!ret) {
8686                         mac_node->state = HCLGE_MAC_ACTIVE;
8687                 } else {
8688                         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8689                                 &vport->state);
8690                         break;
8691                 }
8692         }
8693 }
8694
8695 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8696                                         struct list_head *list,
8697                                         int (*unsync)(struct hclge_vport *,
8698                                                       const unsigned char *))
8699 {
8700         struct hclge_mac_node *mac_node, *tmp;
8701         int ret;
8702
8703         list_for_each_entry_safe(mac_node, tmp, list, node) {
8704                 ret = unsync(vport, mac_node->mac_addr);
8705                 if (!ret || ret == -ENOENT) {
8706                         list_del(&mac_node->node);
8707                         kfree(mac_node);
8708                 } else {
8709                         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8710                                 &vport->state);
8711                         break;
8712                 }
8713         }
8714 }
8715
8716 static bool hclge_sync_from_add_list(struct list_head *add_list,
8717                                      struct list_head *mac_list)
8718 {
8719         struct hclge_mac_node *mac_node, *tmp, *new_node;
8720         bool all_added = true;
8721
8722         list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8723                 if (mac_node->state == HCLGE_MAC_TO_ADD)
8724                         all_added = false;
8725
8726                 /* if the mac address from tmp_add_list is not in the
8727                  * uc/mc_mac_list, it means have received a TO_DEL request
8728                  * during the time window of adding the mac address into mac
8729                  * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8730                  * then it will be removed at next time. else it must be TO_ADD,
8731                  * this address hasn't been added into mac table,
8732                  * so just remove the mac node.
8733                  */
8734                 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8735                 if (new_node) {
8736                         hclge_update_mac_node(new_node, mac_node->state);
8737                         list_del(&mac_node->node);
8738                         kfree(mac_node);
8739                 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8740                         mac_node->state = HCLGE_MAC_TO_DEL;
8741                         list_del(&mac_node->node);
8742                         list_add_tail(&mac_node->node, mac_list);
8743                 } else {
8744                         list_del(&mac_node->node);
8745                         kfree(mac_node);
8746                 }
8747         }
8748
8749         return all_added;
8750 }
8751
8752 static void hclge_sync_from_del_list(struct list_head *del_list,
8753                                      struct list_head *mac_list)
8754 {
8755         struct hclge_mac_node *mac_node, *tmp, *new_node;
8756
8757         list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8758                 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8759                 if (new_node) {
8760                         /* If the mac addr exists in the mac list, it means
8761                          * received a new TO_ADD request during the time window
8762                          * of configuring the mac address. For the mac node
8763                          * state is TO_ADD, and the address is already in the
8764                          * in the hardware(due to delete fail), so we just need
8765                          * to change the mac node state to ACTIVE.
8766                          */
8767                         new_node->state = HCLGE_MAC_ACTIVE;
8768                         list_del(&mac_node->node);
8769                         kfree(mac_node);
8770                 } else {
8771                         list_del(&mac_node->node);
8772                         list_add_tail(&mac_node->node, mac_list);
8773                 }
8774         }
8775 }
8776
8777 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8778                                         enum HCLGE_MAC_ADDR_TYPE mac_type,
8779                                         bool is_all_added)
8780 {
8781         if (mac_type == HCLGE_MAC_ADDR_UC) {
8782                 if (is_all_added)
8783                         vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8784                 else
8785                         vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8786         } else {
8787                 if (is_all_added)
8788                         vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8789                 else
8790                         vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8791         }
8792 }
8793
8794 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8795                                        enum HCLGE_MAC_ADDR_TYPE mac_type)
8796 {
8797         struct hclge_mac_node *mac_node, *tmp, *new_node;
8798         struct list_head tmp_add_list, tmp_del_list;
8799         struct list_head *list;
8800         bool all_added;
8801
8802         INIT_LIST_HEAD(&tmp_add_list);
8803         INIT_LIST_HEAD(&tmp_del_list);
8804
8805         /* move the mac addr to the tmp_add_list and tmp_del_list, then
8806          * we can add/delete these mac addr outside the spin lock
8807          */
8808         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8809                 &vport->uc_mac_list : &vport->mc_mac_list;
8810
8811         spin_lock_bh(&vport->mac_list_lock);
8812
8813         list_for_each_entry_safe(mac_node, tmp, list, node) {
8814                 switch (mac_node->state) {
8815                 case HCLGE_MAC_TO_DEL:
8816                         list_del(&mac_node->node);
8817                         list_add_tail(&mac_node->node, &tmp_del_list);
8818                         break;
8819                 case HCLGE_MAC_TO_ADD:
8820                         new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8821                         if (!new_node)
8822                                 goto stop_traverse;
8823                         ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8824                         new_node->state = mac_node->state;
8825                         list_add_tail(&new_node->node, &tmp_add_list);
8826                         break;
8827                 default:
8828                         break;
8829                 }
8830         }
8831
8832 stop_traverse:
8833         spin_unlock_bh(&vport->mac_list_lock);
8834
8835         /* delete first, in order to get max mac table space for adding */
8836         if (mac_type == HCLGE_MAC_ADDR_UC) {
8837                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8838                                             hclge_rm_uc_addr_common);
8839                 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8840                                           hclge_add_uc_addr_common);
8841         } else {
8842                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8843                                             hclge_rm_mc_addr_common);
8844                 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8845                                           hclge_add_mc_addr_common);
8846         }
8847
8848         /* if some mac addresses were added/deleted fail, move back to the
8849          * mac_list, and retry at next time.
8850          */
8851         spin_lock_bh(&vport->mac_list_lock);
8852
8853         hclge_sync_from_del_list(&tmp_del_list, list);
8854         all_added = hclge_sync_from_add_list(&tmp_add_list, list);
8855
8856         spin_unlock_bh(&vport->mac_list_lock);
8857
8858         hclge_update_overflow_flags(vport, mac_type, all_added);
8859 }
8860
8861 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
8862 {
8863         struct hclge_dev *hdev = vport->back;
8864
8865         if (test_bit(vport->vport_id, hdev->vport_config_block))
8866                 return false;
8867
8868         if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
8869                 return true;
8870
8871         return false;
8872 }
8873
8874 static void hclge_sync_mac_table(struct hclge_dev *hdev)
8875 {
8876         int i;
8877
8878         for (i = 0; i < hdev->num_alloc_vport; i++) {
8879                 struct hclge_vport *vport = &hdev->vport[i];
8880
8881                 if (!hclge_need_sync_mac_table(vport))
8882                         continue;
8883
8884                 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
8885                 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
8886         }
8887 }
8888
8889 static void hclge_build_del_list(struct list_head *list,
8890                                  bool is_del_list,
8891                                  struct list_head *tmp_del_list)
8892 {
8893         struct hclge_mac_node *mac_cfg, *tmp;
8894
8895         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
8896                 switch (mac_cfg->state) {
8897                 case HCLGE_MAC_TO_DEL:
8898                 case HCLGE_MAC_ACTIVE:
8899                         list_del(&mac_cfg->node);
8900                         list_add_tail(&mac_cfg->node, tmp_del_list);
8901                         break;
8902                 case HCLGE_MAC_TO_ADD:
8903                         if (is_del_list) {
8904                                 list_del(&mac_cfg->node);
8905                                 kfree(mac_cfg);
8906                         }
8907                         break;
8908                 }
8909         }
8910 }
8911
8912 static void hclge_unsync_del_list(struct hclge_vport *vport,
8913                                   int (*unsync)(struct hclge_vport *vport,
8914                                                 const unsigned char *addr),
8915                                   bool is_del_list,
8916                                   struct list_head *tmp_del_list)
8917 {
8918         struct hclge_mac_node *mac_cfg, *tmp;
8919         int ret;
8920
8921         list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
8922                 ret = unsync(vport, mac_cfg->mac_addr);
8923                 if (!ret || ret == -ENOENT) {
8924                         /* clear all mac addr from hardware, but remain these
8925                          * mac addr in the mac list, and restore them after
8926                          * vf reset finished.
8927                          */
8928                         if (!is_del_list &&
8929                             mac_cfg->state == HCLGE_MAC_ACTIVE) {
8930                                 mac_cfg->state = HCLGE_MAC_TO_ADD;
8931                         } else {
8932                                 list_del(&mac_cfg->node);
8933                                 kfree(mac_cfg);
8934                         }
8935                 } else if (is_del_list) {
8936                         mac_cfg->state = HCLGE_MAC_TO_DEL;
8937                 }
8938         }
8939 }
8940
8941 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
8942                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
8943 {
8944         int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
8945         struct hclge_dev *hdev = vport->back;
8946         struct list_head tmp_del_list, *list;
8947
8948         if (mac_type == HCLGE_MAC_ADDR_UC) {
8949                 list = &vport->uc_mac_list;
8950                 unsync = hclge_rm_uc_addr_common;
8951         } else {
8952                 list = &vport->mc_mac_list;
8953                 unsync = hclge_rm_mc_addr_common;
8954         }
8955
8956         INIT_LIST_HEAD(&tmp_del_list);
8957
8958         if (!is_del_list)
8959                 set_bit(vport->vport_id, hdev->vport_config_block);
8960
8961         spin_lock_bh(&vport->mac_list_lock);
8962
8963         hclge_build_del_list(list, is_del_list, &tmp_del_list);
8964
8965         spin_unlock_bh(&vport->mac_list_lock);
8966
8967         hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
8968
8969         spin_lock_bh(&vport->mac_list_lock);
8970
8971         hclge_sync_from_del_list(&tmp_del_list, list);
8972
8973         spin_unlock_bh(&vport->mac_list_lock);
8974 }
8975
8976 /* remove all mac address when uninitailize */
8977 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
8978                                         enum HCLGE_MAC_ADDR_TYPE mac_type)
8979 {
8980         struct hclge_mac_node *mac_node, *tmp;
8981         struct hclge_dev *hdev = vport->back;
8982         struct list_head tmp_del_list, *list;
8983
8984         INIT_LIST_HEAD(&tmp_del_list);
8985
8986         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8987                 &vport->uc_mac_list : &vport->mc_mac_list;
8988
8989         spin_lock_bh(&vport->mac_list_lock);
8990
8991         list_for_each_entry_safe(mac_node, tmp, list, node) {
8992                 switch (mac_node->state) {
8993                 case HCLGE_MAC_TO_DEL:
8994                 case HCLGE_MAC_ACTIVE:
8995                         list_del(&mac_node->node);
8996                         list_add_tail(&mac_node->node, &tmp_del_list);
8997                         break;
8998                 case HCLGE_MAC_TO_ADD:
8999                         list_del(&mac_node->node);
9000                         kfree(mac_node);
9001                         break;
9002                 }
9003         }
9004
9005         spin_unlock_bh(&vport->mac_list_lock);
9006
9007         if (mac_type == HCLGE_MAC_ADDR_UC)
9008                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9009                                             hclge_rm_uc_addr_common);
9010         else
9011                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9012                                             hclge_rm_mc_addr_common);
9013
9014         if (!list_empty(&tmp_del_list))
9015                 dev_warn(&hdev->pdev->dev,
9016                          "uninit %s mac list for vport %u not completely.\n",
9017                          mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9018                          vport->vport_id);
9019
9020         list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9021                 list_del(&mac_node->node);
9022                 kfree(mac_node);
9023         }
9024 }
9025
9026 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9027 {
9028         struct hclge_vport *vport;
9029         int i;
9030
9031         for (i = 0; i < hdev->num_alloc_vport; i++) {
9032                 vport = &hdev->vport[i];
9033                 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9034                 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9035         }
9036 }
9037
9038 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9039                                               u16 cmdq_resp, u8 resp_code)
9040 {
9041 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
9042 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
9043 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
9044 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
9045
9046         int return_status;
9047
9048         if (cmdq_resp) {
9049                 dev_err(&hdev->pdev->dev,
9050                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9051                         cmdq_resp);
9052                 return -EIO;
9053         }
9054
9055         switch (resp_code) {
9056         case HCLGE_ETHERTYPE_SUCCESS_ADD:
9057         case HCLGE_ETHERTYPE_ALREADY_ADD:
9058                 return_status = 0;
9059                 break;
9060         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9061                 dev_err(&hdev->pdev->dev,
9062                         "add mac ethertype failed for manager table overflow.\n");
9063                 return_status = -EIO;
9064                 break;
9065         case HCLGE_ETHERTYPE_KEY_CONFLICT:
9066                 dev_err(&hdev->pdev->dev,
9067                         "add mac ethertype failed for key conflict.\n");
9068                 return_status = -EIO;
9069                 break;
9070         default:
9071                 dev_err(&hdev->pdev->dev,
9072                         "add mac ethertype failed for undefined, code=%u.\n",
9073                         resp_code);
9074                 return_status = -EIO;
9075         }
9076
9077         return return_status;
9078 }
9079
9080 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
9081                                      u8 *mac_addr)
9082 {
9083         struct hclge_mac_vlan_tbl_entry_cmd req;
9084         struct hclge_dev *hdev = vport->back;
9085         struct hclge_desc desc;
9086         u16 egress_port = 0;
9087         int i;
9088
9089         if (is_zero_ether_addr(mac_addr))
9090                 return false;
9091
9092         memset(&req, 0, sizeof(req));
9093         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
9094                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
9095         req.egress_port = cpu_to_le16(egress_port);
9096         hclge_prepare_mac_addr(&req, mac_addr, false);
9097
9098         if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
9099                 return true;
9100
9101         vf_idx += HCLGE_VF_VPORT_START_NUM;
9102         for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++)
9103                 if (i != vf_idx &&
9104                     ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
9105                         return true;
9106
9107         return false;
9108 }
9109
9110 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9111                             u8 *mac_addr)
9112 {
9113         struct hclge_vport *vport = hclge_get_vport(handle);
9114         struct hclge_dev *hdev = vport->back;
9115
9116         vport = hclge_get_vf_vport(hdev, vf);
9117         if (!vport)
9118                 return -EINVAL;
9119
9120         if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9121                 dev_info(&hdev->pdev->dev,
9122                          "Specified MAC(=%pM) is same as before, no change committed!\n",
9123                          mac_addr);
9124                 return 0;
9125         }
9126
9127         if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
9128                 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
9129                         mac_addr);
9130                 return -EEXIST;
9131         }
9132
9133         ether_addr_copy(vport->vf_info.mac, mac_addr);
9134
9135         if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9136                 dev_info(&hdev->pdev->dev,
9137                          "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
9138                          vf, mac_addr);
9139                 return hclge_inform_reset_assert_to_vf(vport);
9140         }
9141
9142         dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
9143                  vf, mac_addr);
9144         return 0;
9145 }
9146
9147 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9148                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
9149 {
9150         struct hclge_desc desc;
9151         u8 resp_code;
9152         u16 retval;
9153         int ret;
9154
9155         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9156         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9157
9158         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9159         if (ret) {
9160                 dev_err(&hdev->pdev->dev,
9161                         "add mac ethertype failed for cmd_send, ret =%d.\n",
9162                         ret);
9163                 return ret;
9164         }
9165
9166         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9167         retval = le16_to_cpu(desc.retval);
9168
9169         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9170 }
9171
9172 static int init_mgr_tbl(struct hclge_dev *hdev)
9173 {
9174         int ret;
9175         int i;
9176
9177         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9178                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9179                 if (ret) {
9180                         dev_err(&hdev->pdev->dev,
9181                                 "add mac ethertype failed, ret =%d.\n",
9182                                 ret);
9183                         return ret;
9184                 }
9185         }
9186
9187         return 0;
9188 }
9189
9190 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9191 {
9192         struct hclge_vport *vport = hclge_get_vport(handle);
9193         struct hclge_dev *hdev = vport->back;
9194
9195         ether_addr_copy(p, hdev->hw.mac.mac_addr);
9196 }
9197
9198 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9199                                        const u8 *old_addr, const u8 *new_addr)
9200 {
9201         struct list_head *list = &vport->uc_mac_list;
9202         struct hclge_mac_node *old_node, *new_node;
9203
9204         new_node = hclge_find_mac_node(list, new_addr);
9205         if (!new_node) {
9206                 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9207                 if (!new_node)
9208                         return -ENOMEM;
9209
9210                 new_node->state = HCLGE_MAC_TO_ADD;
9211                 ether_addr_copy(new_node->mac_addr, new_addr);
9212                 list_add(&new_node->node, list);
9213         } else {
9214                 if (new_node->state == HCLGE_MAC_TO_DEL)
9215                         new_node->state = HCLGE_MAC_ACTIVE;
9216
9217                 /* make sure the new addr is in the list head, avoid dev
9218                  * addr may be not re-added into mac table for the umv space
9219                  * limitation after global/imp reset which will clear mac
9220                  * table by hardware.
9221                  */
9222                 list_move(&new_node->node, list);
9223         }
9224
9225         if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9226                 old_node = hclge_find_mac_node(list, old_addr);
9227                 if (old_node) {
9228                         if (old_node->state == HCLGE_MAC_TO_ADD) {
9229                                 list_del(&old_node->node);
9230                                 kfree(old_node);
9231                         } else {
9232                                 old_node->state = HCLGE_MAC_TO_DEL;
9233                         }
9234                 }
9235         }
9236
9237         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9238
9239         return 0;
9240 }
9241
9242 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
9243                               bool is_first)
9244 {
9245         const unsigned char *new_addr = (const unsigned char *)p;
9246         struct hclge_vport *vport = hclge_get_vport(handle);
9247         struct hclge_dev *hdev = vport->back;
9248         unsigned char *old_addr = NULL;
9249         int ret;
9250
9251         /* mac addr check */
9252         if (is_zero_ether_addr(new_addr) ||
9253             is_broadcast_ether_addr(new_addr) ||
9254             is_multicast_ether_addr(new_addr)) {
9255                 dev_err(&hdev->pdev->dev,
9256                         "change uc mac err! invalid mac: %pM.\n",
9257                          new_addr);
9258                 return -EINVAL;
9259         }
9260
9261         ret = hclge_pause_addr_cfg(hdev, new_addr);
9262         if (ret) {
9263                 dev_err(&hdev->pdev->dev,
9264                         "failed to configure mac pause address, ret = %d\n",
9265                         ret);
9266                 return ret;
9267         }
9268
9269         if (!is_first)
9270                 old_addr = hdev->hw.mac.mac_addr;
9271
9272         spin_lock_bh(&vport->mac_list_lock);
9273         ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9274         if (ret) {
9275                 dev_err(&hdev->pdev->dev,
9276                         "failed to change the mac addr:%pM, ret = %d\n",
9277                         new_addr, ret);
9278                 spin_unlock_bh(&vport->mac_list_lock);
9279
9280                 if (!is_first)
9281                         hclge_pause_addr_cfg(hdev, old_addr);
9282
9283                 return ret;
9284         }
9285         /* we must update dev addr with spin lock protect, preventing dev addr
9286          * being removed by set_rx_mode path.
9287          */
9288         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9289         spin_unlock_bh(&vport->mac_list_lock);
9290
9291         hclge_task_schedule(hdev, 0);
9292
9293         return 0;
9294 }
9295
9296 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9297 {
9298         struct mii_ioctl_data *data = if_mii(ifr);
9299
9300         if (!hnae3_dev_phy_imp_supported(hdev))
9301                 return -EOPNOTSUPP;
9302
9303         switch (cmd) {
9304         case SIOCGMIIPHY:
9305                 data->phy_id = hdev->hw.mac.phy_addr;
9306                 /* this command reads phy id and register at the same time */
9307                 fallthrough;
9308         case SIOCGMIIREG:
9309                 data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9310                 return 0;
9311
9312         case SIOCSMIIREG:
9313                 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9314         default:
9315                 return -EOPNOTSUPP;
9316         }
9317 }
9318
9319 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9320                           int cmd)
9321 {
9322         struct hclge_vport *vport = hclge_get_vport(handle);
9323         struct hclge_dev *hdev = vport->back;
9324
9325         if (!hdev->hw.mac.phydev)
9326                 return hclge_mii_ioctl(hdev, ifr, cmd);
9327
9328         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9329 }
9330
9331 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9332                                       u8 fe_type, bool filter_en, u8 vf_id)
9333 {
9334         struct hclge_vlan_filter_ctrl_cmd *req;
9335         struct hclge_desc desc;
9336         int ret;
9337
9338         /* read current vlan filter parameter */
9339         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9340         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9341         req->vlan_type = vlan_type;
9342         req->vf_id = vf_id;
9343
9344         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9345         if (ret) {
9346                 dev_err(&hdev->pdev->dev,
9347                         "failed to get vlan filter config, ret = %d.\n", ret);
9348                 return ret;
9349         }
9350
9351         /* modify and write new config parameter */
9352         hclge_cmd_reuse_desc(&desc, false);
9353         req->vlan_fe = filter_en ?
9354                         (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9355
9356         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9357         if (ret)
9358                 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
9359                         ret);
9360
9361         return ret;
9362 }
9363
9364 #define HCLGE_FILTER_TYPE_VF            0
9365 #define HCLGE_FILTER_TYPE_PORT          1
9366 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
9367 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
9368 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
9369 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
9370 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
9371 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
9372                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
9373 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
9374                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
9375
9376 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9377 {
9378         struct hclge_vport *vport = hclge_get_vport(handle);
9379         struct hclge_dev *hdev = vport->back;
9380
9381         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9382                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9383                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
9384                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9385                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
9386         } else {
9387                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9388                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
9389                                            0);
9390         }
9391         if (enable)
9392                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
9393         else
9394                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
9395 }
9396
9397 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9398                                         bool is_kill, u16 vlan,
9399                                         struct hclge_desc *desc)
9400 {
9401         struct hclge_vlan_filter_vf_cfg_cmd *req0;
9402         struct hclge_vlan_filter_vf_cfg_cmd *req1;
9403         u8 vf_byte_val;
9404         u8 vf_byte_off;
9405         int ret;
9406
9407         hclge_cmd_setup_basic_desc(&desc[0],
9408                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9409         hclge_cmd_setup_basic_desc(&desc[1],
9410                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9411
9412         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9413
9414         vf_byte_off = vfid / 8;
9415         vf_byte_val = 1 << (vfid % 8);
9416
9417         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9418         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9419
9420         req0->vlan_id  = cpu_to_le16(vlan);
9421         req0->vlan_cfg = is_kill;
9422
9423         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9424                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9425         else
9426                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9427
9428         ret = hclge_cmd_send(&hdev->hw, desc, 2);
9429         if (ret) {
9430                 dev_err(&hdev->pdev->dev,
9431                         "Send vf vlan command fail, ret =%d.\n",
9432                         ret);
9433                 return ret;
9434         }
9435
9436         return 0;
9437 }
9438
9439 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9440                                           bool is_kill, struct hclge_desc *desc)
9441 {
9442         struct hclge_vlan_filter_vf_cfg_cmd *req;
9443
9444         req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9445
9446         if (!is_kill) {
9447 #define HCLGE_VF_VLAN_NO_ENTRY  2
9448                 if (!req->resp_code || req->resp_code == 1)
9449                         return 0;
9450
9451                 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9452                         set_bit(vfid, hdev->vf_vlan_full);
9453                         dev_warn(&hdev->pdev->dev,
9454                                  "vf vlan table is full, vf vlan filter is disabled\n");
9455                         return 0;
9456                 }
9457
9458                 dev_err(&hdev->pdev->dev,
9459                         "Add vf vlan filter fail, ret =%u.\n",
9460                         req->resp_code);
9461         } else {
9462 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
9463                 if (!req->resp_code)
9464                         return 0;
9465
9466                 /* vf vlan filter is disabled when vf vlan table is full,
9467                  * then new vlan id will not be added into vf vlan table.
9468                  * Just return 0 without warning, avoid massive verbose
9469                  * print logs when unload.
9470                  */
9471                 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9472                         return 0;
9473
9474                 dev_err(&hdev->pdev->dev,
9475                         "Kill vf vlan filter fail, ret =%u.\n",
9476                         req->resp_code);
9477         }
9478
9479         return -EIO;
9480 }
9481
9482 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9483                                     bool is_kill, u16 vlan)
9484 {
9485         struct hclge_vport *vport = &hdev->vport[vfid];
9486         struct hclge_desc desc[2];
9487         int ret;
9488
9489         /* if vf vlan table is full, firmware will close vf vlan filter, it
9490          * is unable and unnecessary to add new vlan id to vf vlan filter.
9491          * If spoof check is enable, and vf vlan is full, it shouldn't add
9492          * new vlan, because tx packets with these vlan id will be dropped.
9493          */
9494         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9495                 if (vport->vf_info.spoofchk && vlan) {
9496                         dev_err(&hdev->pdev->dev,
9497                                 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
9498                         return -EPERM;
9499                 }
9500                 return 0;
9501         }
9502
9503         ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9504         if (ret)
9505                 return ret;
9506
9507         return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9508 }
9509
9510 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9511                                       u16 vlan_id, bool is_kill)
9512 {
9513         struct hclge_vlan_filter_pf_cfg_cmd *req;
9514         struct hclge_desc desc;
9515         u8 vlan_offset_byte_val;
9516         u8 vlan_offset_byte;
9517         u8 vlan_offset_160;
9518         int ret;
9519
9520         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9521
9522         vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9523         vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9524                            HCLGE_VLAN_BYTE_SIZE;
9525         vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9526
9527         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9528         req->vlan_offset = vlan_offset_160;
9529         req->vlan_cfg = is_kill;
9530         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9531
9532         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9533         if (ret)
9534                 dev_err(&hdev->pdev->dev,
9535                         "port vlan command, send fail, ret =%d.\n", ret);
9536         return ret;
9537 }
9538
9539 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9540                                     u16 vport_id, u16 vlan_id,
9541                                     bool is_kill)
9542 {
9543         u16 vport_idx, vport_num = 0;
9544         int ret;
9545
9546         if (is_kill && !vlan_id)
9547                 return 0;
9548
9549         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9550         if (ret) {
9551                 dev_err(&hdev->pdev->dev,
9552                         "Set %u vport vlan filter config fail, ret =%d.\n",
9553                         vport_id, ret);
9554                 return ret;
9555         }
9556
9557         /* vlan 0 may be added twice when 8021q module is enabled */
9558         if (!is_kill && !vlan_id &&
9559             test_bit(vport_id, hdev->vlan_table[vlan_id]))
9560                 return 0;
9561
9562         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9563                 dev_err(&hdev->pdev->dev,
9564                         "Add port vlan failed, vport %u is already in vlan %u\n",
9565                         vport_id, vlan_id);
9566                 return -EINVAL;
9567         }
9568
9569         if (is_kill &&
9570             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9571                 dev_err(&hdev->pdev->dev,
9572                         "Delete port vlan failed, vport %u is not in vlan %u\n",
9573                         vport_id, vlan_id);
9574                 return -EINVAL;
9575         }
9576
9577         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9578                 vport_num++;
9579
9580         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9581                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9582                                                  is_kill);
9583
9584         return ret;
9585 }
9586
9587 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9588 {
9589         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9590         struct hclge_vport_vtag_tx_cfg_cmd *req;
9591         struct hclge_dev *hdev = vport->back;
9592         struct hclge_desc desc;
9593         u16 bmap_index;
9594         int status;
9595
9596         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9597
9598         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9599         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9600         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9601         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9602                       vcfg->accept_tag1 ? 1 : 0);
9603         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9604                       vcfg->accept_untag1 ? 1 : 0);
9605         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9606                       vcfg->accept_tag2 ? 1 : 0);
9607         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9608                       vcfg->accept_untag2 ? 1 : 0);
9609         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9610                       vcfg->insert_tag1_en ? 1 : 0);
9611         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9612                       vcfg->insert_tag2_en ? 1 : 0);
9613         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9614                       vcfg->tag_shift_mode_en ? 1 : 0);
9615         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9616
9617         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9618         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9619                         HCLGE_VF_NUM_PER_BYTE;
9620         req->vf_bitmap[bmap_index] =
9621                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9622
9623         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9624         if (status)
9625                 dev_err(&hdev->pdev->dev,
9626                         "Send port txvlan cfg command fail, ret =%d\n",
9627                         status);
9628
9629         return status;
9630 }
9631
9632 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9633 {
9634         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9635         struct hclge_vport_vtag_rx_cfg_cmd *req;
9636         struct hclge_dev *hdev = vport->back;
9637         struct hclge_desc desc;
9638         u16 bmap_index;
9639         int status;
9640
9641         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9642
9643         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9644         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9645                       vcfg->strip_tag1_en ? 1 : 0);
9646         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9647                       vcfg->strip_tag2_en ? 1 : 0);
9648         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9649                       vcfg->vlan1_vlan_prionly ? 1 : 0);
9650         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9651                       vcfg->vlan2_vlan_prionly ? 1 : 0);
9652         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9653                       vcfg->strip_tag1_discard_en ? 1 : 0);
9654         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9655                       vcfg->strip_tag2_discard_en ? 1 : 0);
9656
9657         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9658         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9659                         HCLGE_VF_NUM_PER_BYTE;
9660         req->vf_bitmap[bmap_index] =
9661                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9662
9663         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9664         if (status)
9665                 dev_err(&hdev->pdev->dev,
9666                         "Send port rxvlan cfg command fail, ret =%d\n",
9667                         status);
9668
9669         return status;
9670 }
9671
9672 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9673                                   u16 port_base_vlan_state,
9674                                   u16 vlan_tag)
9675 {
9676         int ret;
9677
9678         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9679                 vport->txvlan_cfg.accept_tag1 = true;
9680                 vport->txvlan_cfg.insert_tag1_en = false;
9681                 vport->txvlan_cfg.default_tag1 = 0;
9682         } else {
9683                 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9684
9685                 vport->txvlan_cfg.accept_tag1 =
9686                         ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9687                 vport->txvlan_cfg.insert_tag1_en = true;
9688                 vport->txvlan_cfg.default_tag1 = vlan_tag;
9689         }
9690
9691         vport->txvlan_cfg.accept_untag1 = true;
9692
9693         /* accept_tag2 and accept_untag2 are not supported on
9694          * pdev revision(0x20), new revision support them,
9695          * this two fields can not be configured by user.
9696          */
9697         vport->txvlan_cfg.accept_tag2 = true;
9698         vport->txvlan_cfg.accept_untag2 = true;
9699         vport->txvlan_cfg.insert_tag2_en = false;
9700         vport->txvlan_cfg.default_tag2 = 0;
9701         vport->txvlan_cfg.tag_shift_mode_en = true;
9702
9703         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9704                 vport->rxvlan_cfg.strip_tag1_en = false;
9705                 vport->rxvlan_cfg.strip_tag2_en =
9706                                 vport->rxvlan_cfg.rx_vlan_offload_en;
9707                 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9708         } else {
9709                 vport->rxvlan_cfg.strip_tag1_en =
9710                                 vport->rxvlan_cfg.rx_vlan_offload_en;
9711                 vport->rxvlan_cfg.strip_tag2_en = true;
9712                 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9713         }
9714
9715         vport->rxvlan_cfg.strip_tag1_discard_en = false;
9716         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9717         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9718
9719         ret = hclge_set_vlan_tx_offload_cfg(vport);
9720         if (ret)
9721                 return ret;
9722
9723         return hclge_set_vlan_rx_offload_cfg(vport);
9724 }
9725
9726 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9727 {
9728         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9729         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9730         struct hclge_desc desc;
9731         int status;
9732
9733         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9734         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9735         rx_req->ot_fst_vlan_type =
9736                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9737         rx_req->ot_sec_vlan_type =
9738                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9739         rx_req->in_fst_vlan_type =
9740                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9741         rx_req->in_sec_vlan_type =
9742                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9743
9744         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9745         if (status) {
9746                 dev_err(&hdev->pdev->dev,
9747                         "Send rxvlan protocol type command fail, ret =%d\n",
9748                         status);
9749                 return status;
9750         }
9751
9752         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9753
9754         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9755         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9756         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9757
9758         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9759         if (status)
9760                 dev_err(&hdev->pdev->dev,
9761                         "Send txvlan protocol type command fail, ret =%d\n",
9762                         status);
9763
9764         return status;
9765 }
9766
9767 static int hclge_init_vlan_config(struct hclge_dev *hdev)
9768 {
9769 #define HCLGE_DEF_VLAN_TYPE             0x8100
9770
9771         struct hnae3_handle *handle = &hdev->vport[0].nic;
9772         struct hclge_vport *vport;
9773         int ret;
9774         int i;
9775
9776         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9777                 /* for revision 0x21, vf vlan filter is per function */
9778                 for (i = 0; i < hdev->num_alloc_vport; i++) {
9779                         vport = &hdev->vport[i];
9780                         ret = hclge_set_vlan_filter_ctrl(hdev,
9781                                                          HCLGE_FILTER_TYPE_VF,
9782                                                          HCLGE_FILTER_FE_EGRESS,
9783                                                          true,
9784                                                          vport->vport_id);
9785                         if (ret)
9786                                 return ret;
9787                 }
9788
9789                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9790                                                  HCLGE_FILTER_FE_INGRESS, true,
9791                                                  0);
9792                 if (ret)
9793                         return ret;
9794         } else {
9795                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9796                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
9797                                                  true, 0);
9798                 if (ret)
9799                         return ret;
9800         }
9801
9802         handle->netdev_flags |= HNAE3_VLAN_FLTR;
9803
9804         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9805         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9806         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9807         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9808         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
9809         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
9810
9811         ret = hclge_set_vlan_protocol_type(hdev);
9812         if (ret)
9813                 return ret;
9814
9815         for (i = 0; i < hdev->num_alloc_vport; i++) {
9816                 u16 vlan_tag;
9817
9818                 vport = &hdev->vport[i];
9819                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9820
9821                 ret = hclge_vlan_offload_cfg(vport,
9822                                              vport->port_base_vlan_cfg.state,
9823                                              vlan_tag);
9824                 if (ret)
9825                         return ret;
9826         }
9827
9828         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
9829 }
9830
9831 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9832                                        bool writen_to_tbl)
9833 {
9834         struct hclge_vport_vlan_cfg *vlan;
9835
9836         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
9837         if (!vlan)
9838                 return;
9839
9840         vlan->hd_tbl_status = writen_to_tbl;
9841         vlan->vlan_id = vlan_id;
9842
9843         list_add_tail(&vlan->node, &vport->vlan_list);
9844 }
9845
9846 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
9847 {
9848         struct hclge_vport_vlan_cfg *vlan, *tmp;
9849         struct hclge_dev *hdev = vport->back;
9850         int ret;
9851
9852         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9853                 if (!vlan->hd_tbl_status) {
9854                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9855                                                        vport->vport_id,
9856                                                        vlan->vlan_id, false);
9857                         if (ret) {
9858                                 dev_err(&hdev->pdev->dev,
9859                                         "restore vport vlan list failed, ret=%d\n",
9860                                         ret);
9861                                 return ret;
9862                         }
9863                 }
9864                 vlan->hd_tbl_status = true;
9865         }
9866
9867         return 0;
9868 }
9869
9870 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9871                                       bool is_write_tbl)
9872 {
9873         struct hclge_vport_vlan_cfg *vlan, *tmp;
9874         struct hclge_dev *hdev = vport->back;
9875
9876         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9877                 if (vlan->vlan_id == vlan_id) {
9878                         if (is_write_tbl && vlan->hd_tbl_status)
9879                                 hclge_set_vlan_filter_hw(hdev,
9880                                                          htons(ETH_P_8021Q),
9881                                                          vport->vport_id,
9882                                                          vlan_id,
9883                                                          true);
9884
9885                         list_del(&vlan->node);
9886                         kfree(vlan);
9887                         break;
9888                 }
9889         }
9890 }
9891
9892 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
9893 {
9894         struct hclge_vport_vlan_cfg *vlan, *tmp;
9895         struct hclge_dev *hdev = vport->back;
9896
9897         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9898                 if (vlan->hd_tbl_status)
9899                         hclge_set_vlan_filter_hw(hdev,
9900                                                  htons(ETH_P_8021Q),
9901                                                  vport->vport_id,
9902                                                  vlan->vlan_id,
9903                                                  true);
9904
9905                 vlan->hd_tbl_status = false;
9906                 if (is_del_list) {
9907                         list_del(&vlan->node);
9908                         kfree(vlan);
9909                 }
9910         }
9911         clear_bit(vport->vport_id, hdev->vf_vlan_full);
9912 }
9913
9914 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
9915 {
9916         struct hclge_vport_vlan_cfg *vlan, *tmp;
9917         struct hclge_vport *vport;
9918         int i;
9919
9920         for (i = 0; i < hdev->num_alloc_vport; i++) {
9921                 vport = &hdev->vport[i];
9922                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9923                         list_del(&vlan->node);
9924                         kfree(vlan);
9925                 }
9926         }
9927 }
9928
9929 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
9930 {
9931         struct hclge_vport_vlan_cfg *vlan, *tmp;
9932         struct hclge_dev *hdev = vport->back;
9933         u16 vlan_proto;
9934         u16 vlan_id;
9935         u16 state;
9936         int ret;
9937
9938         vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
9939         vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9940         state = vport->port_base_vlan_cfg.state;
9941
9942         if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
9943                 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
9944                 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
9945                                          vport->vport_id, vlan_id,
9946                                          false);
9947                 return;
9948         }
9949
9950         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9951                 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9952                                                vport->vport_id,
9953                                                vlan->vlan_id, false);
9954                 if (ret)
9955                         break;
9956                 vlan->hd_tbl_status = true;
9957         }
9958 }
9959
9960 /* For global reset and imp reset, hardware will clear the mac table,
9961  * so we change the mac address state from ACTIVE to TO_ADD, then they
9962  * can be restored in the service task after reset complete. Furtherly,
9963  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
9964  * be restored after reset, so just remove these mac nodes from mac_list.
9965  */
9966 static void hclge_mac_node_convert_for_reset(struct list_head *list)
9967 {
9968         struct hclge_mac_node *mac_node, *tmp;
9969
9970         list_for_each_entry_safe(mac_node, tmp, list, node) {
9971                 if (mac_node->state == HCLGE_MAC_ACTIVE) {
9972                         mac_node->state = HCLGE_MAC_TO_ADD;
9973                 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
9974                         list_del(&mac_node->node);
9975                         kfree(mac_node);
9976                 }
9977         }
9978 }
9979
9980 void hclge_restore_mac_table_common(struct hclge_vport *vport)
9981 {
9982         spin_lock_bh(&vport->mac_list_lock);
9983
9984         hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
9985         hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
9986         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9987
9988         spin_unlock_bh(&vport->mac_list_lock);
9989 }
9990
9991 static void hclge_restore_hw_table(struct hclge_dev *hdev)
9992 {
9993         struct hclge_vport *vport = &hdev->vport[0];
9994         struct hnae3_handle *handle = &vport->nic;
9995
9996         hclge_restore_mac_table_common(vport);
9997         hclge_restore_vport_vlan_table(vport);
9998         set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
9999         set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10000         hclge_restore_fd_entries(handle);
10001 }
10002
10003 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10004 {
10005         struct hclge_vport *vport = hclge_get_vport(handle);
10006
10007         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10008                 vport->rxvlan_cfg.strip_tag1_en = false;
10009                 vport->rxvlan_cfg.strip_tag2_en = enable;
10010                 vport->rxvlan_cfg.strip_tag2_discard_en = false;
10011         } else {
10012                 vport->rxvlan_cfg.strip_tag1_en = enable;
10013                 vport->rxvlan_cfg.strip_tag2_en = true;
10014                 vport->rxvlan_cfg.strip_tag2_discard_en = true;
10015         }
10016
10017         vport->rxvlan_cfg.strip_tag1_discard_en = false;
10018         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10019         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10020         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10021
10022         return hclge_set_vlan_rx_offload_cfg(vport);
10023 }
10024
10025 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10026                                             u16 port_base_vlan_state,
10027                                             struct hclge_vlan_info *new_info,
10028                                             struct hclge_vlan_info *old_info)
10029 {
10030         struct hclge_dev *hdev = vport->back;
10031         int ret;
10032
10033         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10034                 hclge_rm_vport_all_vlan_table(vport, false);
10035                 return hclge_set_vlan_filter_hw(hdev,
10036                                                  htons(new_info->vlan_proto),
10037                                                  vport->vport_id,
10038                                                  new_info->vlan_tag,
10039                                                  false);
10040         }
10041
10042         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10043                                        vport->vport_id, old_info->vlan_tag,
10044                                        true);
10045         if (ret)
10046                 return ret;
10047
10048         return hclge_add_vport_all_vlan_table(vport);
10049 }
10050
10051 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10052                                     struct hclge_vlan_info *vlan_info)
10053 {
10054         struct hnae3_handle *nic = &vport->nic;
10055         struct hclge_vlan_info *old_vlan_info;
10056         struct hclge_dev *hdev = vport->back;
10057         int ret;
10058
10059         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10060
10061         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
10062         if (ret)
10063                 return ret;
10064
10065         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
10066                 /* add new VLAN tag */
10067                 ret = hclge_set_vlan_filter_hw(hdev,
10068                                                htons(vlan_info->vlan_proto),
10069                                                vport->vport_id,
10070                                                vlan_info->vlan_tag,
10071                                                false);
10072                 if (ret)
10073                         return ret;
10074
10075                 /* remove old VLAN tag */
10076                 ret = hclge_set_vlan_filter_hw(hdev,
10077                                                htons(old_vlan_info->vlan_proto),
10078                                                vport->vport_id,
10079                                                old_vlan_info->vlan_tag,
10080                                                true);
10081                 if (ret)
10082                         return ret;
10083
10084                 goto update;
10085         }
10086
10087         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10088                                                old_vlan_info);
10089         if (ret)
10090                 return ret;
10091
10092         /* update state only when disable/enable port based VLAN */
10093         vport->port_base_vlan_cfg.state = state;
10094         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10095                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10096         else
10097                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10098
10099 update:
10100         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
10101         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
10102         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
10103
10104         return 0;
10105 }
10106
10107 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10108                                           enum hnae3_port_base_vlan_state state,
10109                                           u16 vlan)
10110 {
10111         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10112                 if (!vlan)
10113                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10114                 else
10115                         return HNAE3_PORT_BASE_VLAN_ENABLE;
10116         } else {
10117                 if (!vlan)
10118                         return HNAE3_PORT_BASE_VLAN_DISABLE;
10119                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
10120                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10121                 else
10122                         return HNAE3_PORT_BASE_VLAN_MODIFY;
10123         }
10124 }
10125
10126 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10127                                     u16 vlan, u8 qos, __be16 proto)
10128 {
10129         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10130         struct hclge_vport *vport = hclge_get_vport(handle);
10131         struct hclge_dev *hdev = vport->back;
10132         struct hclge_vlan_info vlan_info;
10133         u16 state;
10134         int ret;
10135
10136         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10137                 return -EOPNOTSUPP;
10138
10139         vport = hclge_get_vf_vport(hdev, vfid);
10140         if (!vport)
10141                 return -EINVAL;
10142
10143         /* qos is a 3 bits value, so can not be bigger than 7 */
10144         if (vlan > VLAN_N_VID - 1 || qos > 7)
10145                 return -EINVAL;
10146         if (proto != htons(ETH_P_8021Q))
10147                 return -EPROTONOSUPPORT;
10148
10149         state = hclge_get_port_base_vlan_state(vport,
10150                                                vport->port_base_vlan_cfg.state,
10151                                                vlan);
10152         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10153                 return 0;
10154
10155         vlan_info.vlan_tag = vlan;
10156         vlan_info.qos = qos;
10157         vlan_info.vlan_proto = ntohs(proto);
10158
10159         ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10160         if (ret) {
10161                 dev_err(&hdev->pdev->dev,
10162                         "failed to update port base vlan for vf %d, ret = %d\n",
10163                         vfid, ret);
10164                 return ret;
10165         }
10166
10167         /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10168          * VLAN state.
10169          */
10170         if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
10171             test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10172                 hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10173                                                   vport->vport_id, state,
10174                                                   vlan, qos,
10175                                                   ntohs(proto));
10176
10177         return 0;
10178 }
10179
10180 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10181 {
10182         struct hclge_vlan_info *vlan_info;
10183         struct hclge_vport *vport;
10184         int ret;
10185         int vf;
10186
10187         /* clear port base vlan for all vf */
10188         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10189                 vport = &hdev->vport[vf];
10190                 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10191
10192                 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10193                                                vport->vport_id,
10194                                                vlan_info->vlan_tag, true);
10195                 if (ret)
10196                         dev_err(&hdev->pdev->dev,
10197                                 "failed to clear vf vlan for vf%d, ret = %d\n",
10198                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
10199         }
10200 }
10201
10202 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10203                           u16 vlan_id, bool is_kill)
10204 {
10205         struct hclge_vport *vport = hclge_get_vport(handle);
10206         struct hclge_dev *hdev = vport->back;
10207         bool writen_to_tbl = false;
10208         int ret = 0;
10209
10210         /* When device is resetting or reset failed, firmware is unable to
10211          * handle mailbox. Just record the vlan id, and remove it after
10212          * reset finished.
10213          */
10214         if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10215              test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10216                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10217                 return -EBUSY;
10218         }
10219
10220         /* when port base vlan enabled, we use port base vlan as the vlan
10221          * filter entry. In this case, we don't update vlan filter table
10222          * when user add new vlan or remove exist vlan, just update the vport
10223          * vlan list. The vlan id in vlan list will be writen in vlan filter
10224          * table until port base vlan disabled
10225          */
10226         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10227                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10228                                                vlan_id, is_kill);
10229                 writen_to_tbl = true;
10230         }
10231
10232         if (!ret) {
10233                 if (is_kill)
10234                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
10235                 else
10236                         hclge_add_vport_vlan_table(vport, vlan_id,
10237                                                    writen_to_tbl);
10238         } else if (is_kill) {
10239                 /* when remove hw vlan filter failed, record the vlan id,
10240                  * and try to remove it from hw later, to be consistence
10241                  * with stack
10242                  */
10243                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10244         }
10245         return ret;
10246 }
10247
10248 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10249 {
10250 #define HCLGE_MAX_SYNC_COUNT    60
10251
10252         int i, ret, sync_cnt = 0;
10253         u16 vlan_id;
10254
10255         /* start from vport 1 for PF is always alive */
10256         for (i = 0; i < hdev->num_alloc_vport; i++) {
10257                 struct hclge_vport *vport = &hdev->vport[i];
10258
10259                 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10260                                          VLAN_N_VID);
10261                 while (vlan_id != VLAN_N_VID) {
10262                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10263                                                        vport->vport_id, vlan_id,
10264                                                        true);
10265                         if (ret && ret != -EINVAL)
10266                                 return;
10267
10268                         clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10269                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
10270
10271                         sync_cnt++;
10272                         if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
10273                                 return;
10274
10275                         vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10276                                                  VLAN_N_VID);
10277                 }
10278         }
10279 }
10280
10281 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10282 {
10283         struct hclge_config_max_frm_size_cmd *req;
10284         struct hclge_desc desc;
10285
10286         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10287
10288         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10289         req->max_frm_size = cpu_to_le16(new_mps);
10290         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10291
10292         return hclge_cmd_send(&hdev->hw, &desc, 1);
10293 }
10294
10295 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10296 {
10297         struct hclge_vport *vport = hclge_get_vport(handle);
10298
10299         return hclge_set_vport_mtu(vport, new_mtu);
10300 }
10301
10302 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10303 {
10304         struct hclge_dev *hdev = vport->back;
10305         int i, max_frm_size, ret;
10306
10307         /* HW supprt 2 layer vlan */
10308         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10309         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10310             max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10311                 return -EINVAL;
10312
10313         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10314         mutex_lock(&hdev->vport_lock);
10315         /* VF's mps must fit within hdev->mps */
10316         if (vport->vport_id && max_frm_size > hdev->mps) {
10317                 mutex_unlock(&hdev->vport_lock);
10318                 return -EINVAL;
10319         } else if (vport->vport_id) {
10320                 vport->mps = max_frm_size;
10321                 mutex_unlock(&hdev->vport_lock);
10322                 return 0;
10323         }
10324
10325         /* PF's mps must be greater then VF's mps */
10326         for (i = 1; i < hdev->num_alloc_vport; i++)
10327                 if (max_frm_size < hdev->vport[i].mps) {
10328                         mutex_unlock(&hdev->vport_lock);
10329                         return -EINVAL;
10330                 }
10331
10332         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10333
10334         ret = hclge_set_mac_mtu(hdev, max_frm_size);
10335         if (ret) {
10336                 dev_err(&hdev->pdev->dev,
10337                         "Change mtu fail, ret =%d\n", ret);
10338                 goto out;
10339         }
10340
10341         hdev->mps = max_frm_size;
10342         vport->mps = max_frm_size;
10343
10344         ret = hclge_buffer_alloc(hdev);
10345         if (ret)
10346                 dev_err(&hdev->pdev->dev,
10347                         "Allocate buffer fail, ret =%d\n", ret);
10348
10349 out:
10350         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10351         mutex_unlock(&hdev->vport_lock);
10352         return ret;
10353 }
10354
10355 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10356                                     bool enable)
10357 {
10358         struct hclge_reset_tqp_queue_cmd *req;
10359         struct hclge_desc desc;
10360         int ret;
10361
10362         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10363
10364         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10365         req->tqp_id = cpu_to_le16(queue_id);
10366         if (enable)
10367                 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10368
10369         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10370         if (ret) {
10371                 dev_err(&hdev->pdev->dev,
10372                         "Send tqp reset cmd error, status =%d\n", ret);
10373                 return ret;
10374         }
10375
10376         return 0;
10377 }
10378
10379 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
10380 {
10381         struct hclge_reset_tqp_queue_cmd *req;
10382         struct hclge_desc desc;
10383         int ret;
10384
10385         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10386
10387         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10388         req->tqp_id = cpu_to_le16(queue_id);
10389
10390         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10391         if (ret) {
10392                 dev_err(&hdev->pdev->dev,
10393                         "Get reset status error, status =%d\n", ret);
10394                 return ret;
10395         }
10396
10397         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10398 }
10399
10400 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10401 {
10402         struct hnae3_queue *queue;
10403         struct hclge_tqp *tqp;
10404
10405         queue = handle->kinfo.tqp[queue_id];
10406         tqp = container_of(queue, struct hclge_tqp, q);
10407
10408         return tqp->index;
10409 }
10410
10411 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10412 {
10413         struct hclge_vport *vport = hclge_get_vport(handle);
10414         struct hclge_dev *hdev = vport->back;
10415         u16 reset_try_times = 0;
10416         int reset_status;
10417         u16 queue_gid;
10418         int ret;
10419         u16 i;
10420
10421         for (i = 0; i < handle->kinfo.num_tqps; i++) {
10422                 queue_gid = hclge_covert_handle_qid_global(handle, i);
10423                 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10424                 if (ret) {
10425                         dev_err(&hdev->pdev->dev,
10426                                 "failed to send reset tqp cmd, ret = %d\n",
10427                                 ret);
10428                         return ret;
10429                 }
10430
10431                 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10432                         reset_status = hclge_get_reset_status(hdev, queue_gid);
10433                         if (reset_status)
10434                                 break;
10435
10436                         /* Wait for tqp hw reset */
10437                         usleep_range(1000, 1200);
10438                 }
10439
10440                 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10441                         dev_err(&hdev->pdev->dev,
10442                                 "wait for tqp hw reset timeout\n");
10443                         return -ETIME;
10444                 }
10445
10446                 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10447                 if (ret) {
10448                         dev_err(&hdev->pdev->dev,
10449                                 "failed to deassert soft reset, ret = %d\n",
10450                                 ret);
10451                         return ret;
10452                 }
10453                 reset_try_times = 0;
10454         }
10455         return 0;
10456 }
10457
10458 static int hclge_reset_rcb(struct hnae3_handle *handle)
10459 {
10460 #define HCLGE_RESET_RCB_NOT_SUPPORT     0U
10461 #define HCLGE_RESET_RCB_SUCCESS         1U
10462
10463         struct hclge_vport *vport = hclge_get_vport(handle);
10464         struct hclge_dev *hdev = vport->back;
10465         struct hclge_reset_cmd *req;
10466         struct hclge_desc desc;
10467         u8 return_status;
10468         u16 queue_gid;
10469         int ret;
10470
10471         queue_gid = hclge_covert_handle_qid_global(handle, 0);
10472
10473         req = (struct hclge_reset_cmd *)desc.data;
10474         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10475         hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10476         req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10477         req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10478
10479         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10480         if (ret) {
10481                 dev_err(&hdev->pdev->dev,
10482                         "failed to send rcb reset cmd, ret = %d\n", ret);
10483                 return ret;
10484         }
10485
10486         return_status = req->fun_reset_rcb_return_status;
10487         if (return_status == HCLGE_RESET_RCB_SUCCESS)
10488                 return 0;
10489
10490         if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10491                 dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10492                         return_status);
10493                 return -EIO;
10494         }
10495
10496         /* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10497          * again to reset all tqps
10498          */
10499         return hclge_reset_tqp_cmd(handle);
10500 }
10501
10502 int hclge_reset_tqp(struct hnae3_handle *handle)
10503 {
10504         struct hclge_vport *vport = hclge_get_vport(handle);
10505         struct hclge_dev *hdev = vport->back;
10506         int ret;
10507
10508         /* only need to disable PF's tqp */
10509         if (!vport->vport_id) {
10510                 ret = hclge_tqp_enable(handle, false);
10511                 if (ret) {
10512                         dev_err(&hdev->pdev->dev,
10513                                 "failed to disable tqp, ret = %d\n", ret);
10514                         return ret;
10515                 }
10516         }
10517
10518         return hclge_reset_rcb(handle);
10519 }
10520
10521 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10522 {
10523         struct hclge_vport *vport = hclge_get_vport(handle);
10524         struct hclge_dev *hdev = vport->back;
10525
10526         return hdev->fw_version;
10527 }
10528
10529 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10530 {
10531         struct phy_device *phydev = hdev->hw.mac.phydev;
10532
10533         if (!phydev)
10534                 return;
10535
10536         phy_set_asym_pause(phydev, rx_en, tx_en);
10537 }
10538
10539 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10540 {
10541         int ret;
10542
10543         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10544                 return 0;
10545
10546         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10547         if (ret)
10548                 dev_err(&hdev->pdev->dev,
10549                         "configure pauseparam error, ret = %d.\n", ret);
10550
10551         return ret;
10552 }
10553
10554 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10555 {
10556         struct phy_device *phydev = hdev->hw.mac.phydev;
10557         u16 remote_advertising = 0;
10558         u16 local_advertising;
10559         u32 rx_pause, tx_pause;
10560         u8 flowctl;
10561
10562         if (!phydev->link || !phydev->autoneg)
10563                 return 0;
10564
10565         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10566
10567         if (phydev->pause)
10568                 remote_advertising = LPA_PAUSE_CAP;
10569
10570         if (phydev->asym_pause)
10571                 remote_advertising |= LPA_PAUSE_ASYM;
10572
10573         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10574                                            remote_advertising);
10575         tx_pause = flowctl & FLOW_CTRL_TX;
10576         rx_pause = flowctl & FLOW_CTRL_RX;
10577
10578         if (phydev->duplex == HCLGE_MAC_HALF) {
10579                 tx_pause = 0;
10580                 rx_pause = 0;
10581         }
10582
10583         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10584 }
10585
10586 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10587                                  u32 *rx_en, u32 *tx_en)
10588 {
10589         struct hclge_vport *vport = hclge_get_vport(handle);
10590         struct hclge_dev *hdev = vport->back;
10591         u8 media_type = hdev->hw.mac.media_type;
10592
10593         *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10594                     hclge_get_autoneg(handle) : 0;
10595
10596         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10597                 *rx_en = 0;
10598                 *tx_en = 0;
10599                 return;
10600         }
10601
10602         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10603                 *rx_en = 1;
10604                 *tx_en = 0;
10605         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10606                 *tx_en = 1;
10607                 *rx_en = 0;
10608         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10609                 *rx_en = 1;
10610                 *tx_en = 1;
10611         } else {
10612                 *rx_en = 0;
10613                 *tx_en = 0;
10614         }
10615 }
10616
10617 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10618                                          u32 rx_en, u32 tx_en)
10619 {
10620         if (rx_en && tx_en)
10621                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
10622         else if (rx_en && !tx_en)
10623                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10624         else if (!rx_en && tx_en)
10625                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10626         else
10627                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
10628
10629         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10630 }
10631
10632 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10633                                 u32 rx_en, u32 tx_en)
10634 {
10635         struct hclge_vport *vport = hclge_get_vport(handle);
10636         struct hclge_dev *hdev = vport->back;
10637         struct phy_device *phydev = hdev->hw.mac.phydev;
10638         u32 fc_autoneg;
10639
10640         if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
10641                 fc_autoneg = hclge_get_autoneg(handle);
10642                 if (auto_neg != fc_autoneg) {
10643                         dev_info(&hdev->pdev->dev,
10644                                  "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10645                         return -EOPNOTSUPP;
10646                 }
10647         }
10648
10649         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10650                 dev_info(&hdev->pdev->dev,
10651                          "Priority flow control enabled. Cannot set link flow control.\n");
10652                 return -EOPNOTSUPP;
10653         }
10654
10655         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10656
10657         hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10658
10659         if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
10660                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10661
10662         if (phydev)
10663                 return phy_start_aneg(phydev);
10664
10665         return -EOPNOTSUPP;
10666 }
10667
10668 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10669                                           u8 *auto_neg, u32 *speed, u8 *duplex)
10670 {
10671         struct hclge_vport *vport = hclge_get_vport(handle);
10672         struct hclge_dev *hdev = vport->back;
10673
10674         if (speed)
10675                 *speed = hdev->hw.mac.speed;
10676         if (duplex)
10677                 *duplex = hdev->hw.mac.duplex;
10678         if (auto_neg)
10679                 *auto_neg = hdev->hw.mac.autoneg;
10680 }
10681
10682 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
10683                                  u8 *module_type)
10684 {
10685         struct hclge_vport *vport = hclge_get_vport(handle);
10686         struct hclge_dev *hdev = vport->back;
10687
10688         /* When nic is down, the service task is not running, doesn't update
10689          * the port information per second. Query the port information before
10690          * return the media type, ensure getting the correct media information.
10691          */
10692         hclge_update_port_info(hdev);
10693
10694         if (media_type)
10695                 *media_type = hdev->hw.mac.media_type;
10696
10697         if (module_type)
10698                 *module_type = hdev->hw.mac.module_type;
10699 }
10700
10701 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
10702                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
10703 {
10704         struct hclge_vport *vport = hclge_get_vport(handle);
10705         struct hclge_dev *hdev = vport->back;
10706         struct phy_device *phydev = hdev->hw.mac.phydev;
10707         int mdix_ctrl, mdix, is_resolved;
10708         unsigned int retval;
10709
10710         if (!phydev) {
10711                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10712                 *tp_mdix = ETH_TP_MDI_INVALID;
10713                 return;
10714         }
10715
10716         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
10717
10718         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
10719         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
10720                                     HCLGE_PHY_MDIX_CTRL_S);
10721
10722         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
10723         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
10724         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
10725
10726         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
10727
10728         switch (mdix_ctrl) {
10729         case 0x0:
10730                 *tp_mdix_ctrl = ETH_TP_MDI;
10731                 break;
10732         case 0x1:
10733                 *tp_mdix_ctrl = ETH_TP_MDI_X;
10734                 break;
10735         case 0x3:
10736                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
10737                 break;
10738         default:
10739                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10740                 break;
10741         }
10742
10743         if (!is_resolved)
10744                 *tp_mdix = ETH_TP_MDI_INVALID;
10745         else if (mdix)
10746                 *tp_mdix = ETH_TP_MDI_X;
10747         else
10748                 *tp_mdix = ETH_TP_MDI;
10749 }
10750
10751 static void hclge_info_show(struct hclge_dev *hdev)
10752 {
10753         struct device *dev = &hdev->pdev->dev;
10754
10755         dev_info(dev, "PF info begin:\n");
10756
10757         dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
10758         dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
10759         dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
10760         dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
10761         dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
10762         dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
10763         dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
10764         dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
10765         dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
10766         dev_info(dev, "This is %s PF\n",
10767                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
10768         dev_info(dev, "DCB %s\n",
10769                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
10770         dev_info(dev, "MQPRIO %s\n",
10771                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
10772
10773         dev_info(dev, "PF info end.\n");
10774 }
10775
10776 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
10777                                           struct hclge_vport *vport)
10778 {
10779         struct hnae3_client *client = vport->nic.client;
10780         struct hclge_dev *hdev = ae_dev->priv;
10781         int rst_cnt = hdev->rst_stats.reset_cnt;
10782         int ret;
10783
10784         ret = client->ops->init_instance(&vport->nic);
10785         if (ret)
10786                 return ret;
10787
10788         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10789         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10790             rst_cnt != hdev->rst_stats.reset_cnt) {
10791                 ret = -EBUSY;
10792                 goto init_nic_err;
10793         }
10794
10795         /* Enable nic hw error interrupts */
10796         ret = hclge_config_nic_hw_error(hdev, true);
10797         if (ret) {
10798                 dev_err(&ae_dev->pdev->dev,
10799                         "fail(%d) to enable hw error interrupts\n", ret);
10800                 goto init_nic_err;
10801         }
10802
10803         hnae3_set_client_init_flag(client, ae_dev, 1);
10804
10805         if (netif_msg_drv(&hdev->vport->nic))
10806                 hclge_info_show(hdev);
10807
10808         return ret;
10809
10810 init_nic_err:
10811         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10812         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10813                 msleep(HCLGE_WAIT_RESET_DONE);
10814
10815         client->ops->uninit_instance(&vport->nic, 0);
10816
10817         return ret;
10818 }
10819
10820 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
10821                                            struct hclge_vport *vport)
10822 {
10823         struct hclge_dev *hdev = ae_dev->priv;
10824         struct hnae3_client *client;
10825         int rst_cnt;
10826         int ret;
10827
10828         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
10829             !hdev->nic_client)
10830                 return 0;
10831
10832         client = hdev->roce_client;
10833         ret = hclge_init_roce_base_info(vport);
10834         if (ret)
10835                 return ret;
10836
10837         rst_cnt = hdev->rst_stats.reset_cnt;
10838         ret = client->ops->init_instance(&vport->roce);
10839         if (ret)
10840                 return ret;
10841
10842         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10843         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10844             rst_cnt != hdev->rst_stats.reset_cnt) {
10845                 ret = -EBUSY;
10846                 goto init_roce_err;
10847         }
10848
10849         /* Enable roce ras interrupts */
10850         ret = hclge_config_rocee_ras_interrupt(hdev, true);
10851         if (ret) {
10852                 dev_err(&ae_dev->pdev->dev,
10853                         "fail(%d) to enable roce ras interrupts\n", ret);
10854                 goto init_roce_err;
10855         }
10856
10857         hnae3_set_client_init_flag(client, ae_dev, 1);
10858
10859         return 0;
10860
10861 init_roce_err:
10862         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10863         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10864                 msleep(HCLGE_WAIT_RESET_DONE);
10865
10866         hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
10867
10868         return ret;
10869 }
10870
10871 static int hclge_init_client_instance(struct hnae3_client *client,
10872                                       struct hnae3_ae_dev *ae_dev)
10873 {
10874         struct hclge_dev *hdev = ae_dev->priv;
10875         struct hclge_vport *vport = &hdev->vport[0];
10876         int ret;
10877
10878         switch (client->type) {
10879         case HNAE3_CLIENT_KNIC:
10880                 hdev->nic_client = client;
10881                 vport->nic.client = client;
10882                 ret = hclge_init_nic_client_instance(ae_dev, vport);
10883                 if (ret)
10884                         goto clear_nic;
10885
10886                 ret = hclge_init_roce_client_instance(ae_dev, vport);
10887                 if (ret)
10888                         goto clear_roce;
10889
10890                 break;
10891         case HNAE3_CLIENT_ROCE:
10892                 if (hnae3_dev_roce_supported(hdev)) {
10893                         hdev->roce_client = client;
10894                         vport->roce.client = client;
10895                 }
10896
10897                 ret = hclge_init_roce_client_instance(ae_dev, vport);
10898                 if (ret)
10899                         goto clear_roce;
10900
10901                 break;
10902         default:
10903                 return -EINVAL;
10904         }
10905
10906         return 0;
10907
10908 clear_nic:
10909         hdev->nic_client = NULL;
10910         vport->nic.client = NULL;
10911         return ret;
10912 clear_roce:
10913         hdev->roce_client = NULL;
10914         vport->roce.client = NULL;
10915         return ret;
10916 }
10917
10918 static void hclge_uninit_client_instance(struct hnae3_client *client,
10919                                          struct hnae3_ae_dev *ae_dev)
10920 {
10921         struct hclge_dev *hdev = ae_dev->priv;
10922         struct hclge_vport *vport = &hdev->vport[0];
10923
10924         if (hdev->roce_client) {
10925                 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10926                 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10927                         msleep(HCLGE_WAIT_RESET_DONE);
10928
10929                 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
10930                 hdev->roce_client = NULL;
10931                 vport->roce.client = NULL;
10932         }
10933         if (client->type == HNAE3_CLIENT_ROCE)
10934                 return;
10935         if (hdev->nic_client && client->ops->uninit_instance) {
10936                 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10937                 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10938                         msleep(HCLGE_WAIT_RESET_DONE);
10939
10940                 client->ops->uninit_instance(&vport->nic, 0);
10941                 hdev->nic_client = NULL;
10942                 vport->nic.client = NULL;
10943         }
10944 }
10945
10946 static int hclge_dev_mem_map(struct hclge_dev *hdev)
10947 {
10948 #define HCLGE_MEM_BAR           4
10949
10950         struct pci_dev *pdev = hdev->pdev;
10951         struct hclge_hw *hw = &hdev->hw;
10952
10953         /* for device does not have device memory, return directly */
10954         if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
10955                 return 0;
10956
10957         hw->mem_base = devm_ioremap_wc(&pdev->dev,
10958                                        pci_resource_start(pdev, HCLGE_MEM_BAR),
10959                                        pci_resource_len(pdev, HCLGE_MEM_BAR));
10960         if (!hw->mem_base) {
10961                 dev_err(&pdev->dev, "failed to map device memory\n");
10962                 return -EFAULT;
10963         }
10964
10965         return 0;
10966 }
10967
10968 static int hclge_pci_init(struct hclge_dev *hdev)
10969 {
10970         struct pci_dev *pdev = hdev->pdev;
10971         struct hclge_hw *hw;
10972         int ret;
10973
10974         ret = pci_enable_device(pdev);
10975         if (ret) {
10976                 dev_err(&pdev->dev, "failed to enable PCI device\n");
10977                 return ret;
10978         }
10979
10980         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10981         if (ret) {
10982                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10983                 if (ret) {
10984                         dev_err(&pdev->dev,
10985                                 "can't set consistent PCI DMA");
10986                         goto err_disable_device;
10987                 }
10988                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
10989         }
10990
10991         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
10992         if (ret) {
10993                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
10994                 goto err_disable_device;
10995         }
10996
10997         pci_set_master(pdev);
10998         hw = &hdev->hw;
10999         hw->io_base = pcim_iomap(pdev, 2, 0);
11000         if (!hw->io_base) {
11001                 dev_err(&pdev->dev, "Can't map configuration register space\n");
11002                 ret = -ENOMEM;
11003                 goto err_clr_master;
11004         }
11005
11006         ret = hclge_dev_mem_map(hdev);
11007         if (ret)
11008                 goto err_unmap_io_base;
11009
11010         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11011
11012         return 0;
11013
11014 err_unmap_io_base:
11015         pcim_iounmap(pdev, hdev->hw.io_base);
11016 err_clr_master:
11017         pci_clear_master(pdev);
11018         pci_release_regions(pdev);
11019 err_disable_device:
11020         pci_disable_device(pdev);
11021
11022         return ret;
11023 }
11024
11025 static void hclge_pci_uninit(struct hclge_dev *hdev)
11026 {
11027         struct pci_dev *pdev = hdev->pdev;
11028
11029         if (hdev->hw.mem_base)
11030                 devm_iounmap(&pdev->dev, hdev->hw.mem_base);
11031
11032         pcim_iounmap(pdev, hdev->hw.io_base);
11033         pci_free_irq_vectors(pdev);
11034         pci_clear_master(pdev);
11035         pci_release_mem_regions(pdev);
11036         pci_disable_device(pdev);
11037 }
11038
11039 static void hclge_state_init(struct hclge_dev *hdev)
11040 {
11041         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11042         set_bit(HCLGE_STATE_DOWN, &hdev->state);
11043         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11044         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11045         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11046         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11047         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11048 }
11049
11050 static void hclge_state_uninit(struct hclge_dev *hdev)
11051 {
11052         set_bit(HCLGE_STATE_DOWN, &hdev->state);
11053         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11054
11055         if (hdev->reset_timer.function)
11056                 del_timer_sync(&hdev->reset_timer);
11057         if (hdev->service_task.work.func)
11058                 cancel_delayed_work_sync(&hdev->service_task);
11059 }
11060
11061 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
11062 {
11063 #define HCLGE_FLR_RETRY_WAIT_MS 500
11064 #define HCLGE_FLR_RETRY_CNT     5
11065
11066         struct hclge_dev *hdev = ae_dev->priv;
11067         int retry_cnt = 0;
11068         int ret;
11069
11070 retry:
11071         down(&hdev->reset_sem);
11072         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11073         hdev->reset_type = HNAE3_FLR_RESET;
11074         ret = hclge_reset_prepare(hdev);
11075         if (ret || hdev->reset_pending) {
11076                 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
11077                         ret);
11078                 if (hdev->reset_pending ||
11079                     retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
11080                         dev_err(&hdev->pdev->dev,
11081                                 "reset_pending:0x%lx, retry_cnt:%d\n",
11082                                 hdev->reset_pending, retry_cnt);
11083                         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11084                         up(&hdev->reset_sem);
11085                         msleep(HCLGE_FLR_RETRY_WAIT_MS);
11086                         goto retry;
11087                 }
11088         }
11089
11090         /* disable misc vector before FLR done */
11091         hclge_enable_vector(&hdev->misc_vector, false);
11092         set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
11093         hdev->rst_stats.flr_rst_cnt++;
11094 }
11095
11096 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
11097 {
11098         struct hclge_dev *hdev = ae_dev->priv;
11099         int ret;
11100
11101         hclge_enable_vector(&hdev->misc_vector, true);
11102
11103         ret = hclge_reset_rebuild(hdev);
11104         if (ret)
11105                 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11106
11107         hdev->reset_type = HNAE3_NONE_RESET;
11108         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11109         up(&hdev->reset_sem);
11110 }
11111
11112 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11113 {
11114         u16 i;
11115
11116         for (i = 0; i < hdev->num_alloc_vport; i++) {
11117                 struct hclge_vport *vport = &hdev->vport[i];
11118                 int ret;
11119
11120                  /* Send cmd to clear VF's FUNC_RST_ING */
11121                 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11122                 if (ret)
11123                         dev_warn(&hdev->pdev->dev,
11124                                  "clear vf(%u) rst failed %d!\n",
11125                                  vport->vport_id, ret);
11126         }
11127 }
11128
11129 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11130 {
11131         struct pci_dev *pdev = ae_dev->pdev;
11132         struct hclge_dev *hdev;
11133         int ret;
11134
11135         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11136         if (!hdev)
11137                 return -ENOMEM;
11138
11139         hdev->pdev = pdev;
11140         hdev->ae_dev = ae_dev;
11141         hdev->reset_type = HNAE3_NONE_RESET;
11142         hdev->reset_level = HNAE3_FUNC_RESET;
11143         ae_dev->priv = hdev;
11144
11145         /* HW supprt 2 layer vlan */
11146         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11147
11148         mutex_init(&hdev->vport_lock);
11149         spin_lock_init(&hdev->fd_rule_lock);
11150         sema_init(&hdev->reset_sem, 1);
11151
11152         ret = hclge_pci_init(hdev);
11153         if (ret)
11154                 goto out;
11155
11156         /* Firmware command queue initialize */
11157         ret = hclge_cmd_queue_init(hdev);
11158         if (ret)
11159                 goto err_pci_uninit;
11160
11161         /* Firmware command initialize */
11162         ret = hclge_cmd_init(hdev);
11163         if (ret)
11164                 goto err_cmd_uninit;
11165
11166         ret = hclge_get_cap(hdev);
11167         if (ret)
11168                 goto err_cmd_uninit;
11169
11170         ret = hclge_query_dev_specs(hdev);
11171         if (ret) {
11172                 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11173                         ret);
11174                 goto err_cmd_uninit;
11175         }
11176
11177         ret = hclge_configure(hdev);
11178         if (ret) {
11179                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11180                 goto err_cmd_uninit;
11181         }
11182
11183         ret = hclge_init_msi(hdev);
11184         if (ret) {
11185                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11186                 goto err_cmd_uninit;
11187         }
11188
11189         ret = hclge_misc_irq_init(hdev);
11190         if (ret)
11191                 goto err_msi_uninit;
11192
11193         ret = hclge_alloc_tqps(hdev);
11194         if (ret) {
11195                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11196                 goto err_msi_irq_uninit;
11197         }
11198
11199         ret = hclge_alloc_vport(hdev);
11200         if (ret)
11201                 goto err_msi_irq_uninit;
11202
11203         ret = hclge_map_tqp(hdev);
11204         if (ret)
11205                 goto err_msi_irq_uninit;
11206
11207         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
11208             !hnae3_dev_phy_imp_supported(hdev)) {
11209                 ret = hclge_mac_mdio_config(hdev);
11210                 if (ret)
11211                         goto err_msi_irq_uninit;
11212         }
11213
11214         ret = hclge_init_umv_space(hdev);
11215         if (ret)
11216                 goto err_mdiobus_unreg;
11217
11218         ret = hclge_mac_init(hdev);
11219         if (ret) {
11220                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11221                 goto err_mdiobus_unreg;
11222         }
11223
11224         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11225         if (ret) {
11226                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11227                 goto err_mdiobus_unreg;
11228         }
11229
11230         ret = hclge_config_gro(hdev, true);
11231         if (ret)
11232                 goto err_mdiobus_unreg;
11233
11234         ret = hclge_init_vlan_config(hdev);
11235         if (ret) {
11236                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11237                 goto err_mdiobus_unreg;
11238         }
11239
11240         ret = hclge_tm_schd_init(hdev);
11241         if (ret) {
11242                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11243                 goto err_mdiobus_unreg;
11244         }
11245
11246         ret = hclge_rss_init_cfg(hdev);
11247         if (ret) {
11248                 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11249                 goto err_mdiobus_unreg;
11250         }
11251
11252         ret = hclge_rss_init_hw(hdev);
11253         if (ret) {
11254                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11255                 goto err_mdiobus_unreg;
11256         }
11257
11258         ret = init_mgr_tbl(hdev);
11259         if (ret) {
11260                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11261                 goto err_mdiobus_unreg;
11262         }
11263
11264         ret = hclge_init_fd_config(hdev);
11265         if (ret) {
11266                 dev_err(&pdev->dev,
11267                         "fd table init fail, ret=%d\n", ret);
11268                 goto err_mdiobus_unreg;
11269         }
11270
11271         INIT_KFIFO(hdev->mac_tnl_log);
11272
11273         hclge_dcb_ops_set(hdev);
11274
11275         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11276         INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11277
11278         /* Setup affinity after service timer setup because add_timer_on
11279          * is called in affinity notify.
11280          */
11281         hclge_misc_affinity_setup(hdev);
11282
11283         hclge_clear_all_event_cause(hdev);
11284         hclge_clear_resetting_state(hdev);
11285
11286         /* Log and clear the hw errors those already occurred */
11287         hclge_handle_all_hns_hw_errors(ae_dev);
11288
11289         /* request delayed reset for the error recovery because an immediate
11290          * global reset on a PF affecting pending initialization of other PFs
11291          */
11292         if (ae_dev->hw_err_reset_req) {
11293                 enum hnae3_reset_type reset_level;
11294
11295                 reset_level = hclge_get_reset_level(ae_dev,
11296                                                     &ae_dev->hw_err_reset_req);
11297                 hclge_set_def_reset_request(ae_dev, reset_level);
11298                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11299         }
11300
11301         /* Enable MISC vector(vector0) */
11302         hclge_enable_vector(&hdev->misc_vector, true);
11303
11304         hclge_state_init(hdev);
11305         hdev->last_reset_time = jiffies;
11306
11307         dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11308                  HCLGE_DRIVER_NAME);
11309
11310         hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11311
11312         return 0;
11313
11314 err_mdiobus_unreg:
11315         if (hdev->hw.mac.phydev)
11316                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
11317 err_msi_irq_uninit:
11318         hclge_misc_irq_uninit(hdev);
11319 err_msi_uninit:
11320         pci_free_irq_vectors(pdev);
11321 err_cmd_uninit:
11322         hclge_cmd_uninit(hdev);
11323 err_pci_uninit:
11324         pcim_iounmap(pdev, hdev->hw.io_base);
11325         pci_clear_master(pdev);
11326         pci_release_regions(pdev);
11327         pci_disable_device(pdev);
11328 out:
11329         mutex_destroy(&hdev->vport_lock);
11330         return ret;
11331 }
11332
11333 static void hclge_stats_clear(struct hclge_dev *hdev)
11334 {
11335         memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11336 }
11337
11338 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11339 {
11340         return hclge_config_switch_param(hdev, vf, enable,
11341                                          HCLGE_SWITCH_ANTI_SPOOF_MASK);
11342 }
11343
11344 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11345 {
11346         return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11347                                           HCLGE_FILTER_FE_NIC_INGRESS_B,
11348                                           enable, vf);
11349 }
11350
11351 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11352 {
11353         int ret;
11354
11355         ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11356         if (ret) {
11357                 dev_err(&hdev->pdev->dev,
11358                         "Set vf %d mac spoof check %s failed, ret=%d\n",
11359                         vf, enable ? "on" : "off", ret);
11360                 return ret;
11361         }
11362
11363         ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11364         if (ret)
11365                 dev_err(&hdev->pdev->dev,
11366                         "Set vf %d vlan spoof check %s failed, ret=%d\n",
11367                         vf, enable ? "on" : "off", ret);
11368
11369         return ret;
11370 }
11371
11372 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11373                                  bool enable)
11374 {
11375         struct hclge_vport *vport = hclge_get_vport(handle);
11376         struct hclge_dev *hdev = vport->back;
11377         u32 new_spoofchk = enable ? 1 : 0;
11378         int ret;
11379
11380         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11381                 return -EOPNOTSUPP;
11382
11383         vport = hclge_get_vf_vport(hdev, vf);
11384         if (!vport)
11385                 return -EINVAL;
11386
11387         if (vport->vf_info.spoofchk == new_spoofchk)
11388                 return 0;
11389
11390         if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11391                 dev_warn(&hdev->pdev->dev,
11392                          "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11393                          vf);
11394         else if (enable && hclge_is_umv_space_full(vport, true))
11395                 dev_warn(&hdev->pdev->dev,
11396                          "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11397                          vf);
11398
11399         ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11400         if (ret)
11401                 return ret;
11402
11403         vport->vf_info.spoofchk = new_spoofchk;
11404         return 0;
11405 }
11406
11407 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11408 {
11409         struct hclge_vport *vport = hdev->vport;
11410         int ret;
11411         int i;
11412
11413         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11414                 return 0;
11415
11416         /* resume the vf spoof check state after reset */
11417         for (i = 0; i < hdev->num_alloc_vport; i++) {
11418                 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11419                                                vport->vf_info.spoofchk);
11420                 if (ret)
11421                         return ret;
11422
11423                 vport++;
11424         }
11425
11426         return 0;
11427 }
11428
11429 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11430 {
11431         struct hclge_vport *vport = hclge_get_vport(handle);
11432         struct hclge_dev *hdev = vport->back;
11433         struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
11434         u32 new_trusted = enable ? 1 : 0;
11435         bool en_bc_pmc;
11436         int ret;
11437
11438         vport = hclge_get_vf_vport(hdev, vf);
11439         if (!vport)
11440                 return -EINVAL;
11441
11442         if (vport->vf_info.trusted == new_trusted)
11443                 return 0;
11444
11445         /* Disable promisc mode for VF if it is not trusted any more. */
11446         if (!enable && vport->vf_info.promisc_enable) {
11447                 en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
11448                 ret = hclge_set_vport_promisc_mode(vport, false, false,
11449                                                    en_bc_pmc);
11450                 if (ret)
11451                         return ret;
11452                 vport->vf_info.promisc_enable = 0;
11453                 hclge_inform_vf_promisc_info(vport);
11454         }
11455
11456         vport->vf_info.trusted = new_trusted;
11457
11458         return 0;
11459 }
11460
11461 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11462 {
11463         int ret;
11464         int vf;
11465
11466         /* reset vf rate to default value */
11467         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11468                 struct hclge_vport *vport = &hdev->vport[vf];
11469
11470                 vport->vf_info.max_tx_rate = 0;
11471                 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11472                 if (ret)
11473                         dev_err(&hdev->pdev->dev,
11474                                 "vf%d failed to reset to default, ret=%d\n",
11475                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
11476         }
11477 }
11478
11479 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11480                                      int min_tx_rate, int max_tx_rate)
11481 {
11482         if (min_tx_rate != 0 ||
11483             max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11484                 dev_err(&hdev->pdev->dev,
11485                         "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11486                         min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11487                 return -EINVAL;
11488         }
11489
11490         return 0;
11491 }
11492
11493 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11494                              int min_tx_rate, int max_tx_rate, bool force)
11495 {
11496         struct hclge_vport *vport = hclge_get_vport(handle);
11497         struct hclge_dev *hdev = vport->back;
11498         int ret;
11499
11500         ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11501         if (ret)
11502                 return ret;
11503
11504         vport = hclge_get_vf_vport(hdev, vf);
11505         if (!vport)
11506                 return -EINVAL;
11507
11508         if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11509                 return 0;
11510
11511         ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11512         if (ret)
11513                 return ret;
11514
11515         vport->vf_info.max_tx_rate = max_tx_rate;
11516
11517         return 0;
11518 }
11519
11520 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11521 {
11522         struct hnae3_handle *handle = &hdev->vport->nic;
11523         struct hclge_vport *vport;
11524         int ret;
11525         int vf;
11526
11527         /* resume the vf max_tx_rate after reset */
11528         for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11529                 vport = hclge_get_vf_vport(hdev, vf);
11530                 if (!vport)
11531                         return -EINVAL;
11532
11533                 /* zero means max rate, after reset, firmware already set it to
11534                  * max rate, so just continue.
11535                  */
11536                 if (!vport->vf_info.max_tx_rate)
11537                         continue;
11538
11539                 ret = hclge_set_vf_rate(handle, vf, 0,
11540                                         vport->vf_info.max_tx_rate, true);
11541                 if (ret) {
11542                         dev_err(&hdev->pdev->dev,
11543                                 "vf%d failed to resume tx_rate:%u, ret=%d\n",
11544                                 vf, vport->vf_info.max_tx_rate, ret);
11545                         return ret;
11546                 }
11547         }
11548
11549         return 0;
11550 }
11551
11552 static void hclge_reset_vport_state(struct hclge_dev *hdev)
11553 {
11554         struct hclge_vport *vport = hdev->vport;
11555         int i;
11556
11557         for (i = 0; i < hdev->num_alloc_vport; i++) {
11558                 hclge_vport_stop(vport);
11559                 vport++;
11560         }
11561 }
11562
11563 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11564 {
11565         struct hclge_dev *hdev = ae_dev->priv;
11566         struct pci_dev *pdev = ae_dev->pdev;
11567         int ret;
11568
11569         set_bit(HCLGE_STATE_DOWN, &hdev->state);
11570
11571         hclge_stats_clear(hdev);
11572         /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11573          * so here should not clean table in memory.
11574          */
11575         if (hdev->reset_type == HNAE3_IMP_RESET ||
11576             hdev->reset_type == HNAE3_GLOBAL_RESET) {
11577                 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11578                 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11579                 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11580                 hclge_reset_umv_space(hdev);
11581         }
11582
11583         ret = hclge_cmd_init(hdev);
11584         if (ret) {
11585                 dev_err(&pdev->dev, "Cmd queue init failed\n");
11586                 return ret;
11587         }
11588
11589         ret = hclge_map_tqp(hdev);
11590         if (ret) {
11591                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11592                 return ret;
11593         }
11594
11595         ret = hclge_mac_init(hdev);
11596         if (ret) {
11597                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11598                 return ret;
11599         }
11600
11601         ret = hclge_tp_port_init(hdev);
11602         if (ret) {
11603                 dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
11604                         ret);
11605                 return ret;
11606         }
11607
11608         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11609         if (ret) {
11610                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11611                 return ret;
11612         }
11613
11614         ret = hclge_config_gro(hdev, true);
11615         if (ret)
11616                 return ret;
11617
11618         ret = hclge_init_vlan_config(hdev);
11619         if (ret) {
11620                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11621                 return ret;
11622         }
11623
11624         ret = hclge_tm_init_hw(hdev, true);
11625         if (ret) {
11626                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
11627                 return ret;
11628         }
11629
11630         ret = hclge_rss_init_hw(hdev);
11631         if (ret) {
11632                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11633                 return ret;
11634         }
11635
11636         ret = init_mgr_tbl(hdev);
11637         if (ret) {
11638                 dev_err(&pdev->dev,
11639                         "failed to reinit manager table, ret = %d\n", ret);
11640                 return ret;
11641         }
11642
11643         ret = hclge_init_fd_config(hdev);
11644         if (ret) {
11645                 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
11646                 return ret;
11647         }
11648
11649         /* Log and clear the hw errors those already occurred */
11650         hclge_handle_all_hns_hw_errors(ae_dev);
11651
11652         /* Re-enable the hw error interrupts because
11653          * the interrupts get disabled on global reset.
11654          */
11655         ret = hclge_config_nic_hw_error(hdev, true);
11656         if (ret) {
11657                 dev_err(&pdev->dev,
11658                         "fail(%d) to re-enable NIC hw error interrupts\n",
11659                         ret);
11660                 return ret;
11661         }
11662
11663         if (hdev->roce_client) {
11664                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
11665                 if (ret) {
11666                         dev_err(&pdev->dev,
11667                                 "fail(%d) to re-enable roce ras interrupts\n",
11668                                 ret);
11669                         return ret;
11670                 }
11671         }
11672
11673         hclge_reset_vport_state(hdev);
11674         ret = hclge_reset_vport_spoofchk(hdev);
11675         if (ret)
11676                 return ret;
11677
11678         ret = hclge_resume_vf_rate(hdev);
11679         if (ret)
11680                 return ret;
11681
11682         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
11683                  HCLGE_DRIVER_NAME);
11684
11685         return 0;
11686 }
11687
11688 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
11689 {
11690         struct hclge_dev *hdev = ae_dev->priv;
11691         struct hclge_mac *mac = &hdev->hw.mac;
11692
11693         hclge_reset_vf_rate(hdev);
11694         hclge_clear_vf_vlan(hdev);
11695         hclge_misc_affinity_teardown(hdev);
11696         hclge_state_uninit(hdev);
11697         hclge_uninit_mac_table(hdev);
11698         hclge_del_all_fd_entries(hdev);
11699
11700         if (mac->phydev)
11701                 mdiobus_unregister(mac->mdio_bus);
11702
11703         /* Disable MISC vector(vector0) */
11704         hclge_enable_vector(&hdev->misc_vector, false);
11705         synchronize_irq(hdev->misc_vector.vector_irq);
11706
11707         /* Disable all hw interrupts */
11708         hclge_config_mac_tnl_int(hdev, false);
11709         hclge_config_nic_hw_error(hdev, false);
11710         hclge_config_rocee_ras_interrupt(hdev, false);
11711
11712         hclge_cmd_uninit(hdev);
11713         hclge_misc_irq_uninit(hdev);
11714         hclge_pci_uninit(hdev);
11715         mutex_destroy(&hdev->vport_lock);
11716         hclge_uninit_vport_vlan_table(hdev);
11717         ae_dev->priv = NULL;
11718 }
11719
11720 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
11721 {
11722         struct hclge_vport *vport = hclge_get_vport(handle);
11723         struct hclge_dev *hdev = vport->back;
11724
11725         return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
11726 }
11727
11728 static void hclge_get_channels(struct hnae3_handle *handle,
11729                                struct ethtool_channels *ch)
11730 {
11731         ch->max_combined = hclge_get_max_channels(handle);
11732         ch->other_count = 1;
11733         ch->max_other = 1;
11734         ch->combined_count = handle->kinfo.rss_size;
11735 }
11736
11737 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
11738                                         u16 *alloc_tqps, u16 *max_rss_size)
11739 {
11740         struct hclge_vport *vport = hclge_get_vport(handle);
11741         struct hclge_dev *hdev = vport->back;
11742
11743         *alloc_tqps = vport->alloc_tqps;
11744         *max_rss_size = hdev->pf_rss_size_max;
11745 }
11746
11747 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
11748                               bool rxfh_configured)
11749 {
11750         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
11751         struct hclge_vport *vport = hclge_get_vport(handle);
11752         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
11753         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
11754         struct hclge_dev *hdev = vport->back;
11755         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
11756         u16 cur_rss_size = kinfo->rss_size;
11757         u16 cur_tqps = kinfo->num_tqps;
11758         u16 tc_valid[HCLGE_MAX_TC_NUM];
11759         u16 roundup_size;
11760         u32 *rss_indir;
11761         unsigned int i;
11762         int ret;
11763
11764         kinfo->req_rss_size = new_tqps_num;
11765
11766         ret = hclge_tm_vport_map_update(hdev);
11767         if (ret) {
11768                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
11769                 return ret;
11770         }
11771
11772         roundup_size = roundup_pow_of_two(kinfo->rss_size);
11773         roundup_size = ilog2(roundup_size);
11774         /* Set the RSS TC mode according to the new RSS size */
11775         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
11776                 tc_valid[i] = 0;
11777
11778                 if (!(hdev->hw_tc_map & BIT(i)))
11779                         continue;
11780
11781                 tc_valid[i] = 1;
11782                 tc_size[i] = roundup_size;
11783                 tc_offset[i] = kinfo->rss_size * i;
11784         }
11785         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
11786         if (ret)
11787                 return ret;
11788
11789         /* RSS indirection table has been configuared by user */
11790         if (rxfh_configured)
11791                 goto out;
11792
11793         /* Reinitializes the rss indirect table according to the new RSS size */
11794         rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
11795                             GFP_KERNEL);
11796         if (!rss_indir)
11797                 return -ENOMEM;
11798
11799         for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
11800                 rss_indir[i] = i % kinfo->rss_size;
11801
11802         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
11803         if (ret)
11804                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
11805                         ret);
11806
11807         kfree(rss_indir);
11808
11809 out:
11810         if (!ret)
11811                 dev_info(&hdev->pdev->dev,
11812                          "Channels changed, rss_size from %u to %u, tqps from %u to %u",
11813                          cur_rss_size, kinfo->rss_size,
11814                          cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
11815
11816         return ret;
11817 }
11818
11819 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
11820                               u32 *regs_num_64_bit)
11821 {
11822         struct hclge_desc desc;
11823         u32 total_num;
11824         int ret;
11825
11826         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
11827         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11828         if (ret) {
11829                 dev_err(&hdev->pdev->dev,
11830                         "Query register number cmd failed, ret = %d.\n", ret);
11831                 return ret;
11832         }
11833
11834         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
11835         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
11836
11837         total_num = *regs_num_32_bit + *regs_num_64_bit;
11838         if (!total_num)
11839                 return -EINVAL;
11840
11841         return 0;
11842 }
11843
11844 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11845                                  void *data)
11846 {
11847 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
11848 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
11849
11850         struct hclge_desc *desc;
11851         u32 *reg_val = data;
11852         __le32 *desc_data;
11853         int nodata_num;
11854         int cmd_num;
11855         int i, k, n;
11856         int ret;
11857
11858         if (regs_num == 0)
11859                 return 0;
11860
11861         nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
11862         cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
11863                                HCLGE_32_BIT_REG_RTN_DATANUM);
11864         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11865         if (!desc)
11866                 return -ENOMEM;
11867
11868         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
11869         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11870         if (ret) {
11871                 dev_err(&hdev->pdev->dev,
11872                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
11873                 kfree(desc);
11874                 return ret;
11875         }
11876
11877         for (i = 0; i < cmd_num; i++) {
11878                 if (i == 0) {
11879                         desc_data = (__le32 *)(&desc[i].data[0]);
11880                         n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
11881                 } else {
11882                         desc_data = (__le32 *)(&desc[i]);
11883                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
11884                 }
11885                 for (k = 0; k < n; k++) {
11886                         *reg_val++ = le32_to_cpu(*desc_data++);
11887
11888                         regs_num--;
11889                         if (!regs_num)
11890                                 break;
11891                 }
11892         }
11893
11894         kfree(desc);
11895         return 0;
11896 }
11897
11898 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11899                                  void *data)
11900 {
11901 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
11902 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
11903
11904         struct hclge_desc *desc;
11905         u64 *reg_val = data;
11906         __le64 *desc_data;
11907         int nodata_len;
11908         int cmd_num;
11909         int i, k, n;
11910         int ret;
11911
11912         if (regs_num == 0)
11913                 return 0;
11914
11915         nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
11916         cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
11917                                HCLGE_64_BIT_REG_RTN_DATANUM);
11918         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11919         if (!desc)
11920                 return -ENOMEM;
11921
11922         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
11923         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11924         if (ret) {
11925                 dev_err(&hdev->pdev->dev,
11926                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
11927                 kfree(desc);
11928                 return ret;
11929         }
11930
11931         for (i = 0; i < cmd_num; i++) {
11932                 if (i == 0) {
11933                         desc_data = (__le64 *)(&desc[i].data[0]);
11934                         n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
11935                 } else {
11936                         desc_data = (__le64 *)(&desc[i]);
11937                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
11938                 }
11939                 for (k = 0; k < n; k++) {
11940                         *reg_val++ = le64_to_cpu(*desc_data++);
11941
11942                         regs_num--;
11943                         if (!regs_num)
11944                                 break;
11945                 }
11946         }
11947
11948         kfree(desc);
11949         return 0;
11950 }
11951
11952 #define MAX_SEPARATE_NUM        4
11953 #define SEPARATOR_VALUE         0xFDFCFBFA
11954 #define REG_NUM_PER_LINE        4
11955 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
11956 #define REG_SEPARATOR_LINE      1
11957 #define REG_NUM_REMAIN_MASK     3
11958
11959 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
11960 {
11961         int i;
11962
11963         /* initialize command BD except the last one */
11964         for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
11965                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
11966                                            true);
11967                 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11968         }
11969
11970         /* initialize the last command BD */
11971         hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
11972
11973         return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
11974 }
11975
11976 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
11977                                     int *bd_num_list,
11978                                     u32 type_num)
11979 {
11980         u32 entries_per_desc, desc_index, index, offset, i;
11981         struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
11982         int ret;
11983
11984         ret = hclge_query_bd_num_cmd_send(hdev, desc);
11985         if (ret) {
11986                 dev_err(&hdev->pdev->dev,
11987                         "Get dfx bd num fail, status is %d.\n", ret);
11988                 return ret;
11989         }
11990
11991         entries_per_desc = ARRAY_SIZE(desc[0].data);
11992         for (i = 0; i < type_num; i++) {
11993                 offset = hclge_dfx_bd_offset_list[i];
11994                 index = offset % entries_per_desc;
11995                 desc_index = offset / entries_per_desc;
11996                 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
11997         }
11998
11999         return ret;
12000 }
12001
12002 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
12003                                   struct hclge_desc *desc_src, int bd_num,
12004                                   enum hclge_opcode_type cmd)
12005 {
12006         struct hclge_desc *desc = desc_src;
12007         int i, ret;
12008
12009         hclge_cmd_setup_basic_desc(desc, cmd, true);
12010         for (i = 0; i < bd_num - 1; i++) {
12011                 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12012                 desc++;
12013                 hclge_cmd_setup_basic_desc(desc, cmd, true);
12014         }
12015
12016         desc = desc_src;
12017         ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
12018         if (ret)
12019                 dev_err(&hdev->pdev->dev,
12020                         "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
12021                         cmd, ret);
12022
12023         return ret;
12024 }
12025
12026 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
12027                                     void *data)
12028 {
12029         int entries_per_desc, reg_num, separator_num, desc_index, index, i;
12030         struct hclge_desc *desc = desc_src;
12031         u32 *reg = data;
12032
12033         entries_per_desc = ARRAY_SIZE(desc->data);
12034         reg_num = entries_per_desc * bd_num;
12035         separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
12036         for (i = 0; i < reg_num; i++) {
12037                 index = i % entries_per_desc;
12038                 desc_index = i / entries_per_desc;
12039                 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
12040         }
12041         for (i = 0; i < separator_num; i++)
12042                 *reg++ = SEPARATOR_VALUE;
12043
12044         return reg_num + separator_num;
12045 }
12046
12047 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
12048 {
12049         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12050         int data_len_per_desc, bd_num, i;
12051         int *bd_num_list;
12052         u32 data_len;
12053         int ret;
12054
12055         bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12056         if (!bd_num_list)
12057                 return -ENOMEM;
12058
12059         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12060         if (ret) {
12061                 dev_err(&hdev->pdev->dev,
12062                         "Get dfx reg bd num fail, status is %d.\n", ret);
12063                 goto out;
12064         }
12065
12066         data_len_per_desc = sizeof_field(struct hclge_desc, data);
12067         *len = 0;
12068         for (i = 0; i < dfx_reg_type_num; i++) {
12069                 bd_num = bd_num_list[i];
12070                 data_len = data_len_per_desc * bd_num;
12071                 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
12072         }
12073
12074 out:
12075         kfree(bd_num_list);
12076         return ret;
12077 }
12078
12079 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
12080 {
12081         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12082         int bd_num, bd_num_max, buf_len, i;
12083         struct hclge_desc *desc_src;
12084         int *bd_num_list;
12085         u32 *reg = data;
12086         int ret;
12087
12088         bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12089         if (!bd_num_list)
12090                 return -ENOMEM;
12091
12092         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12093         if (ret) {
12094                 dev_err(&hdev->pdev->dev,
12095                         "Get dfx reg bd num fail, status is %d.\n", ret);
12096                 goto out;
12097         }
12098
12099         bd_num_max = bd_num_list[0];
12100         for (i = 1; i < dfx_reg_type_num; i++)
12101                 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
12102
12103         buf_len = sizeof(*desc_src) * bd_num_max;
12104         desc_src = kzalloc(buf_len, GFP_KERNEL);
12105         if (!desc_src) {
12106                 ret = -ENOMEM;
12107                 goto out;
12108         }
12109
12110         for (i = 0; i < dfx_reg_type_num; i++) {
12111                 bd_num = bd_num_list[i];
12112                 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
12113                                              hclge_dfx_reg_opcode_list[i]);
12114                 if (ret) {
12115                         dev_err(&hdev->pdev->dev,
12116                                 "Get dfx reg fail, status is %d.\n", ret);
12117                         break;
12118                 }
12119
12120                 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
12121         }
12122
12123         kfree(desc_src);
12124 out:
12125         kfree(bd_num_list);
12126         return ret;
12127 }
12128
12129 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
12130                               struct hnae3_knic_private_info *kinfo)
12131 {
12132 #define HCLGE_RING_REG_OFFSET           0x200
12133 #define HCLGE_RING_INT_REG_OFFSET       0x4
12134
12135         int i, j, reg_num, separator_num;
12136         int data_num_sum;
12137         u32 *reg = data;
12138
12139         /* fetching per-PF registers valus from PF PCIe register space */
12140         reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
12141         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12142         for (i = 0; i < reg_num; i++)
12143                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
12144         for (i = 0; i < separator_num; i++)
12145                 *reg++ = SEPARATOR_VALUE;
12146         data_num_sum = reg_num + separator_num;
12147
12148         reg_num = ARRAY_SIZE(common_reg_addr_list);
12149         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12150         for (i = 0; i < reg_num; i++)
12151                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
12152         for (i = 0; i < separator_num; i++)
12153                 *reg++ = SEPARATOR_VALUE;
12154         data_num_sum += reg_num + separator_num;
12155
12156         reg_num = ARRAY_SIZE(ring_reg_addr_list);
12157         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12158         for (j = 0; j < kinfo->num_tqps; j++) {
12159                 for (i = 0; i < reg_num; i++)
12160                         *reg++ = hclge_read_dev(&hdev->hw,
12161                                                 ring_reg_addr_list[i] +
12162                                                 HCLGE_RING_REG_OFFSET * j);
12163                 for (i = 0; i < separator_num; i++)
12164                         *reg++ = SEPARATOR_VALUE;
12165         }
12166         data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
12167
12168         reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
12169         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12170         for (j = 0; j < hdev->num_msi_used - 1; j++) {
12171                 for (i = 0; i < reg_num; i++)
12172                         *reg++ = hclge_read_dev(&hdev->hw,
12173                                                 tqp_intr_reg_addr_list[i] +
12174                                                 HCLGE_RING_INT_REG_OFFSET * j);
12175                 for (i = 0; i < separator_num; i++)
12176                         *reg++ = SEPARATOR_VALUE;
12177         }
12178         data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
12179
12180         return data_num_sum;
12181 }
12182
12183 static int hclge_get_regs_len(struct hnae3_handle *handle)
12184 {
12185         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
12186         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12187         struct hclge_vport *vport = hclge_get_vport(handle);
12188         struct hclge_dev *hdev = vport->back;
12189         int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
12190         int regs_lines_32_bit, regs_lines_64_bit;
12191         int ret;
12192
12193         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12194         if (ret) {
12195                 dev_err(&hdev->pdev->dev,
12196                         "Get register number failed, ret = %d.\n", ret);
12197                 return ret;
12198         }
12199
12200         ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
12201         if (ret) {
12202                 dev_err(&hdev->pdev->dev,
12203                         "Get dfx reg len failed, ret = %d.\n", ret);
12204                 return ret;
12205         }
12206
12207         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
12208                 REG_SEPARATOR_LINE;
12209         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
12210                 REG_SEPARATOR_LINE;
12211         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
12212                 REG_SEPARATOR_LINE;
12213         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
12214                 REG_SEPARATOR_LINE;
12215         regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
12216                 REG_SEPARATOR_LINE;
12217         regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
12218                 REG_SEPARATOR_LINE;
12219
12220         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
12221                 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
12222                 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
12223 }
12224
12225 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
12226                            void *data)
12227 {
12228         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12229         struct hclge_vport *vport = hclge_get_vport(handle);
12230         struct hclge_dev *hdev = vport->back;
12231         u32 regs_num_32_bit, regs_num_64_bit;
12232         int i, reg_num, separator_num, ret;
12233         u32 *reg = data;
12234
12235         *version = hdev->fw_version;
12236
12237         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12238         if (ret) {
12239                 dev_err(&hdev->pdev->dev,
12240                         "Get register number failed, ret = %d.\n", ret);
12241                 return;
12242         }
12243
12244         reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
12245
12246         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
12247         if (ret) {
12248                 dev_err(&hdev->pdev->dev,
12249                         "Get 32 bit register failed, ret = %d.\n", ret);
12250                 return;
12251         }
12252         reg_num = regs_num_32_bit;
12253         reg += reg_num;
12254         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12255         for (i = 0; i < separator_num; i++)
12256                 *reg++ = SEPARATOR_VALUE;
12257
12258         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
12259         if (ret) {
12260                 dev_err(&hdev->pdev->dev,
12261                         "Get 64 bit register failed, ret = %d.\n", ret);
12262                 return;
12263         }
12264         reg_num = regs_num_64_bit * 2;
12265         reg += reg_num;
12266         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12267         for (i = 0; i < separator_num; i++)
12268                 *reg++ = SEPARATOR_VALUE;
12269
12270         ret = hclge_get_dfx_reg(hdev, reg);
12271         if (ret)
12272                 dev_err(&hdev->pdev->dev,
12273                         "Get dfx register failed, ret = %d.\n", ret);
12274 }
12275
12276 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12277 {
12278         struct hclge_set_led_state_cmd *req;
12279         struct hclge_desc desc;
12280         int ret;
12281
12282         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12283
12284         req = (struct hclge_set_led_state_cmd *)desc.data;
12285         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12286                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12287
12288         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12289         if (ret)
12290                 dev_err(&hdev->pdev->dev,
12291                         "Send set led state cmd error, ret =%d\n", ret);
12292
12293         return ret;
12294 }
12295
12296 enum hclge_led_status {
12297         HCLGE_LED_OFF,
12298         HCLGE_LED_ON,
12299         HCLGE_LED_NO_CHANGE = 0xFF,
12300 };
12301
12302 static int hclge_set_led_id(struct hnae3_handle *handle,
12303                             enum ethtool_phys_id_state status)
12304 {
12305         struct hclge_vport *vport = hclge_get_vport(handle);
12306         struct hclge_dev *hdev = vport->back;
12307
12308         switch (status) {
12309         case ETHTOOL_ID_ACTIVE:
12310                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
12311         case ETHTOOL_ID_INACTIVE:
12312                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12313         default:
12314                 return -EINVAL;
12315         }
12316 }
12317
12318 static void hclge_get_link_mode(struct hnae3_handle *handle,
12319                                 unsigned long *supported,
12320                                 unsigned long *advertising)
12321 {
12322         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12323         struct hclge_vport *vport = hclge_get_vport(handle);
12324         struct hclge_dev *hdev = vport->back;
12325         unsigned int idx = 0;
12326
12327         for (; idx < size; idx++) {
12328                 supported[idx] = hdev->hw.mac.supported[idx];
12329                 advertising[idx] = hdev->hw.mac.advertising[idx];
12330         }
12331 }
12332
12333 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12334 {
12335         struct hclge_vport *vport = hclge_get_vport(handle);
12336         struct hclge_dev *hdev = vport->back;
12337
12338         return hclge_config_gro(hdev, enable);
12339 }
12340
12341 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12342 {
12343         struct hclge_vport *vport = &hdev->vport[0];
12344         struct hnae3_handle *handle = &vport->nic;
12345         u8 tmp_flags;
12346         int ret;
12347
12348         if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12349                 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
12350                 vport->last_promisc_flags = vport->overflow_promisc_flags;
12351         }
12352
12353         if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
12354                 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12355                 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12356                                              tmp_flags & HNAE3_MPE);
12357                 if (!ret) {
12358                         clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
12359                         hclge_enable_vlan_filter(handle,
12360                                                  tmp_flags & HNAE3_VLAN_FLTR);
12361                 }
12362         }
12363 }
12364
12365 static bool hclge_module_existed(struct hclge_dev *hdev)
12366 {
12367         struct hclge_desc desc;
12368         u32 existed;
12369         int ret;
12370
12371         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12372         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12373         if (ret) {
12374                 dev_err(&hdev->pdev->dev,
12375                         "failed to get SFP exist state, ret = %d\n", ret);
12376                 return false;
12377         }
12378
12379         existed = le32_to_cpu(desc.data[0]);
12380
12381         return existed != 0;
12382 }
12383
12384 /* need 6 bds(total 140 bytes) in one reading
12385  * return the number of bytes actually read, 0 means read failed.
12386  */
12387 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12388                                      u32 len, u8 *data)
12389 {
12390         struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12391         struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12392         u16 read_len;
12393         u16 copy_len;
12394         int ret;
12395         int i;
12396
12397         /* setup all 6 bds to read module eeprom info. */
12398         for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12399                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12400                                            true);
12401
12402                 /* bd0~bd4 need next flag */
12403                 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12404                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12405         }
12406
12407         /* setup bd0, this bd contains offset and read length. */
12408         sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12409         sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12410         read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12411         sfp_info_bd0->read_len = cpu_to_le16(read_len);
12412
12413         ret = hclge_cmd_send(&hdev->hw, desc, i);
12414         if (ret) {
12415                 dev_err(&hdev->pdev->dev,
12416                         "failed to get SFP eeprom info, ret = %d\n", ret);
12417                 return 0;
12418         }
12419
12420         /* copy sfp info from bd0 to out buffer. */
12421         copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12422         memcpy(data, sfp_info_bd0->data, copy_len);
12423         read_len = copy_len;
12424
12425         /* copy sfp info from bd1~bd5 to out buffer if needed. */
12426         for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12427                 if (read_len >= len)
12428                         return read_len;
12429
12430                 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12431                 memcpy(data + read_len, desc[i].data, copy_len);
12432                 read_len += copy_len;
12433         }
12434
12435         return read_len;
12436 }
12437
12438 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12439                                    u32 len, u8 *data)
12440 {
12441         struct hclge_vport *vport = hclge_get_vport(handle);
12442         struct hclge_dev *hdev = vport->back;
12443         u32 read_len = 0;
12444         u16 data_len;
12445
12446         if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12447                 return -EOPNOTSUPP;
12448
12449         if (!hclge_module_existed(hdev))
12450                 return -ENXIO;
12451
12452         while (read_len < len) {
12453                 data_len = hclge_get_sfp_eeprom_info(hdev,
12454                                                      offset + read_len,
12455                                                      len - read_len,
12456                                                      data + read_len);
12457                 if (!data_len)
12458                         return -EIO;
12459
12460                 read_len += data_len;
12461         }
12462
12463         return 0;
12464 }
12465
12466 static const struct hnae3_ae_ops hclge_ops = {
12467         .init_ae_dev = hclge_init_ae_dev,
12468         .uninit_ae_dev = hclge_uninit_ae_dev,
12469         .flr_prepare = hclge_flr_prepare,
12470         .flr_done = hclge_flr_done,
12471         .init_client_instance = hclge_init_client_instance,
12472         .uninit_client_instance = hclge_uninit_client_instance,
12473         .map_ring_to_vector = hclge_map_ring_to_vector,
12474         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12475         .get_vector = hclge_get_vector,
12476         .put_vector = hclge_put_vector,
12477         .set_promisc_mode = hclge_set_promisc_mode,
12478         .request_update_promisc_mode = hclge_request_update_promisc_mode,
12479         .set_loopback = hclge_set_loopback,
12480         .start = hclge_ae_start,
12481         .stop = hclge_ae_stop,
12482         .client_start = hclge_client_start,
12483         .client_stop = hclge_client_stop,
12484         .get_status = hclge_get_status,
12485         .get_ksettings_an_result = hclge_get_ksettings_an_result,
12486         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12487         .get_media_type = hclge_get_media_type,
12488         .check_port_speed = hclge_check_port_speed,
12489         .get_fec = hclge_get_fec,
12490         .set_fec = hclge_set_fec,
12491         .get_rss_key_size = hclge_get_rss_key_size,
12492         .get_rss = hclge_get_rss,
12493         .set_rss = hclge_set_rss,
12494         .set_rss_tuple = hclge_set_rss_tuple,
12495         .get_rss_tuple = hclge_get_rss_tuple,
12496         .get_tc_size = hclge_get_tc_size,
12497         .get_mac_addr = hclge_get_mac_addr,
12498         .set_mac_addr = hclge_set_mac_addr,
12499         .do_ioctl = hclge_do_ioctl,
12500         .add_uc_addr = hclge_add_uc_addr,
12501         .rm_uc_addr = hclge_rm_uc_addr,
12502         .add_mc_addr = hclge_add_mc_addr,
12503         .rm_mc_addr = hclge_rm_mc_addr,
12504         .set_autoneg = hclge_set_autoneg,
12505         .get_autoneg = hclge_get_autoneg,
12506         .restart_autoneg = hclge_restart_autoneg,
12507         .halt_autoneg = hclge_halt_autoneg,
12508         .get_pauseparam = hclge_get_pauseparam,
12509         .set_pauseparam = hclge_set_pauseparam,
12510         .set_mtu = hclge_set_mtu,
12511         .reset_queue = hclge_reset_tqp,
12512         .get_stats = hclge_get_stats,
12513         .get_mac_stats = hclge_get_mac_stat,
12514         .update_stats = hclge_update_stats,
12515         .get_strings = hclge_get_strings,
12516         .get_sset_count = hclge_get_sset_count,
12517         .get_fw_version = hclge_get_fw_version,
12518         .get_mdix_mode = hclge_get_mdix_mode,
12519         .enable_vlan_filter = hclge_enable_vlan_filter,
12520         .set_vlan_filter = hclge_set_vlan_filter,
12521         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12522         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
12523         .reset_event = hclge_reset_event,
12524         .get_reset_level = hclge_get_reset_level,
12525         .set_default_reset_request = hclge_set_def_reset_request,
12526         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
12527         .set_channels = hclge_set_channels,
12528         .get_channels = hclge_get_channels,
12529         .get_regs_len = hclge_get_regs_len,
12530         .get_regs = hclge_get_regs,
12531         .set_led_id = hclge_set_led_id,
12532         .get_link_mode = hclge_get_link_mode,
12533         .add_fd_entry = hclge_add_fd_entry,
12534         .del_fd_entry = hclge_del_fd_entry,
12535         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
12536         .get_fd_rule_info = hclge_get_fd_rule_info,
12537         .get_fd_all_rules = hclge_get_all_rules,
12538         .enable_fd = hclge_enable_fd,
12539         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
12540         .dbg_run_cmd = hclge_dbg_run_cmd,
12541         .dbg_read_cmd = hclge_dbg_read_cmd,
12542         .handle_hw_ras_error = hclge_handle_hw_ras_error,
12543         .get_hw_reset_stat = hclge_get_hw_reset_stat,
12544         .ae_dev_resetting = hclge_ae_dev_resetting,
12545         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
12546         .set_gro_en = hclge_gro_en,
12547         .get_global_queue_id = hclge_covert_handle_qid_global,
12548         .set_timer_task = hclge_set_timer_task,
12549         .mac_connect_phy = hclge_mac_connect_phy,
12550         .mac_disconnect_phy = hclge_mac_disconnect_phy,
12551         .get_vf_config = hclge_get_vf_config,
12552         .set_vf_link_state = hclge_set_vf_link_state,
12553         .set_vf_spoofchk = hclge_set_vf_spoofchk,
12554         .set_vf_trust = hclge_set_vf_trust,
12555         .set_vf_rate = hclge_set_vf_rate,
12556         .set_vf_mac = hclge_set_vf_mac,
12557         .get_module_eeprom = hclge_get_module_eeprom,
12558         .get_cmdq_stat = hclge_get_cmdq_stat,
12559         .add_cls_flower = hclge_add_cls_flower,
12560         .del_cls_flower = hclge_del_cls_flower,
12561         .cls_flower_active = hclge_is_cls_flower_active,
12562         .get_phy_link_ksettings = hclge_get_phy_link_ksettings,
12563         .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
12564 };
12565
12566 static struct hnae3_ae_algo ae_algo = {
12567         .ops = &hclge_ops,
12568         .pdev_id_table = ae_algo_pci_tbl,
12569 };
12570
12571 static int hclge_init(void)
12572 {
12573         pr_info("%s is initializing\n", HCLGE_NAME);
12574
12575         hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
12576         if (!hclge_wq) {
12577                 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
12578                 return -ENOMEM;
12579         }
12580
12581         hnae3_register_ae_algo(&ae_algo);
12582
12583         return 0;
12584 }
12585
12586 static void hclge_exit(void)
12587 {
12588         hnae3_unregister_ae_algo(&ae_algo);
12589         destroy_workqueue(hclge_wq);
12590 }
12591 module_init(hclge_init);
12592 module_exit(hclge_exit);
12593
12594 MODULE_LICENSE("GPL");
12595 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
12596 MODULE_DESCRIPTION("HCLGE Driver");
12597 MODULE_VERSION(HCLGE_MOD_VERSION);