Merge branch 'for-5.12/intel-ish' into for-linus
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME                      "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT     256U
31 #define HCLGE_BUF_MUL_BY        2
32 #define HCLGE_BUF_DIV_BY        2
33 #define NEED_RESERVE_TC_NUM     2
34 #define BUF_MAX_PERCENT         100
35 #define BUF_RESERVE_PERCENT     90
36
37 #define HCLGE_RESET_MAX_FAIL_CNT        5
38 #define HCLGE_RESET_SYNC_TIME           100
39 #define HCLGE_PF_RESET_SYNC_TIME        20
40 #define HCLGE_PF_RESET_SYNC_CNT         1500
41
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET        1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
46 #define HCLGE_DFX_IGU_BD_OFFSET         4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
49 #define HCLGE_DFX_NCSI_BD_OFFSET        7
50 #define HCLGE_DFX_RTC_BD_OFFSET         8
51 #define HCLGE_DFX_PPP_BD_OFFSET         9
52 #define HCLGE_DFX_RCB_BD_OFFSET         10
53 #define HCLGE_DFX_TQP_BD_OFFSET         11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
55
56 #define HCLGE_LINK_STATUS_MS    10
57
58 #define HCLGE_VF_VPORT_START_NUM        1
59
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68                                                    unsigned long *addr);
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70
71 static void hclge_sync_mac_table(struct hclge_dev *hdev);
72 static void hclge_restore_hw_table(struct hclge_dev *hdev);
73 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74
75 static struct hnae3_ae_algo ae_algo;
76
77 static struct workqueue_struct *hclge_wq;
78
79 static const struct pci_device_id ae_algo_pci_tbl[] = {
80         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
88         /* required last entry */
89         {0, }
90 };
91
92 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
93
94 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
95                                          HCLGE_CMDQ_TX_ADDR_H_REG,
96                                          HCLGE_CMDQ_TX_DEPTH_REG,
97                                          HCLGE_CMDQ_TX_TAIL_REG,
98                                          HCLGE_CMDQ_TX_HEAD_REG,
99                                          HCLGE_CMDQ_RX_ADDR_L_REG,
100                                          HCLGE_CMDQ_RX_ADDR_H_REG,
101                                          HCLGE_CMDQ_RX_DEPTH_REG,
102                                          HCLGE_CMDQ_RX_TAIL_REG,
103                                          HCLGE_CMDQ_RX_HEAD_REG,
104                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
105                                          HCLGE_CMDQ_INTR_STS_REG,
106                                          HCLGE_CMDQ_INTR_EN_REG,
107                                          HCLGE_CMDQ_INTR_GEN_REG};
108
109 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
110                                            HCLGE_VECTOR0_OTER_EN_REG,
111                                            HCLGE_MISC_RESET_STS_REG,
112                                            HCLGE_MISC_VECTOR_INT_STS,
113                                            HCLGE_GLOBAL_RESET_REG,
114                                            HCLGE_FUN_RST_ING,
115                                            HCLGE_GRO_EN_REG};
116
117 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
118                                          HCLGE_RING_RX_ADDR_H_REG,
119                                          HCLGE_RING_RX_BD_NUM_REG,
120                                          HCLGE_RING_RX_BD_LENGTH_REG,
121                                          HCLGE_RING_RX_MERGE_EN_REG,
122                                          HCLGE_RING_RX_TAIL_REG,
123                                          HCLGE_RING_RX_HEAD_REG,
124                                          HCLGE_RING_RX_FBD_NUM_REG,
125                                          HCLGE_RING_RX_OFFSET_REG,
126                                          HCLGE_RING_RX_FBD_OFFSET_REG,
127                                          HCLGE_RING_RX_STASH_REG,
128                                          HCLGE_RING_RX_BD_ERR_REG,
129                                          HCLGE_RING_TX_ADDR_L_REG,
130                                          HCLGE_RING_TX_ADDR_H_REG,
131                                          HCLGE_RING_TX_BD_NUM_REG,
132                                          HCLGE_RING_TX_PRIORITY_REG,
133                                          HCLGE_RING_TX_TC_REG,
134                                          HCLGE_RING_TX_MERGE_EN_REG,
135                                          HCLGE_RING_TX_TAIL_REG,
136                                          HCLGE_RING_TX_HEAD_REG,
137                                          HCLGE_RING_TX_FBD_NUM_REG,
138                                          HCLGE_RING_TX_OFFSET_REG,
139                                          HCLGE_RING_TX_EBD_NUM_REG,
140                                          HCLGE_RING_TX_EBD_OFFSET_REG,
141                                          HCLGE_RING_TX_BD_ERR_REG,
142                                          HCLGE_RING_EN_REG};
143
144 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
145                                              HCLGE_TQP_INTR_GL0_REG,
146                                              HCLGE_TQP_INTR_GL1_REG,
147                                              HCLGE_TQP_INTR_GL2_REG,
148                                              HCLGE_TQP_INTR_RL_REG};
149
150 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
151         "App    Loopback test",
152         "Serdes serial Loopback test",
153         "Serdes parallel Loopback test",
154         "Phy    Loopback test"
155 };
156
157 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
158         {"mac_tx_mac_pause_num",
159                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
160         {"mac_rx_mac_pause_num",
161                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
162         {"mac_tx_control_pkt_num",
163                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
164         {"mac_rx_control_pkt_num",
165                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
166         {"mac_tx_pfc_pkt_num",
167                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
168         {"mac_tx_pfc_pri0_pkt_num",
169                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
170         {"mac_tx_pfc_pri1_pkt_num",
171                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
172         {"mac_tx_pfc_pri2_pkt_num",
173                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
174         {"mac_tx_pfc_pri3_pkt_num",
175                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
176         {"mac_tx_pfc_pri4_pkt_num",
177                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
178         {"mac_tx_pfc_pri5_pkt_num",
179                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
180         {"mac_tx_pfc_pri6_pkt_num",
181                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
182         {"mac_tx_pfc_pri7_pkt_num",
183                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
184         {"mac_rx_pfc_pkt_num",
185                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
186         {"mac_rx_pfc_pri0_pkt_num",
187                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
188         {"mac_rx_pfc_pri1_pkt_num",
189                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
190         {"mac_rx_pfc_pri2_pkt_num",
191                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
192         {"mac_rx_pfc_pri3_pkt_num",
193                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
194         {"mac_rx_pfc_pri4_pkt_num",
195                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
196         {"mac_rx_pfc_pri5_pkt_num",
197                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
198         {"mac_rx_pfc_pri6_pkt_num",
199                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
200         {"mac_rx_pfc_pri7_pkt_num",
201                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
202         {"mac_tx_total_pkt_num",
203                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
204         {"mac_tx_total_oct_num",
205                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
206         {"mac_tx_good_pkt_num",
207                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
208         {"mac_tx_bad_pkt_num",
209                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
210         {"mac_tx_good_oct_num",
211                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
212         {"mac_tx_bad_oct_num",
213                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
214         {"mac_tx_uni_pkt_num",
215                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
216         {"mac_tx_multi_pkt_num",
217                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
218         {"mac_tx_broad_pkt_num",
219                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
220         {"mac_tx_undersize_pkt_num",
221                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
222         {"mac_tx_oversize_pkt_num",
223                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
224         {"mac_tx_64_oct_pkt_num",
225                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
226         {"mac_tx_65_127_oct_pkt_num",
227                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
228         {"mac_tx_128_255_oct_pkt_num",
229                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
230         {"mac_tx_256_511_oct_pkt_num",
231                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
232         {"mac_tx_512_1023_oct_pkt_num",
233                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
234         {"mac_tx_1024_1518_oct_pkt_num",
235                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
236         {"mac_tx_1519_2047_oct_pkt_num",
237                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
238         {"mac_tx_2048_4095_oct_pkt_num",
239                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
240         {"mac_tx_4096_8191_oct_pkt_num",
241                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
242         {"mac_tx_8192_9216_oct_pkt_num",
243                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
244         {"mac_tx_9217_12287_oct_pkt_num",
245                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
246         {"mac_tx_12288_16383_oct_pkt_num",
247                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
248         {"mac_tx_1519_max_good_pkt_num",
249                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
250         {"mac_tx_1519_max_bad_pkt_num",
251                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
252         {"mac_rx_total_pkt_num",
253                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
254         {"mac_rx_total_oct_num",
255                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
256         {"mac_rx_good_pkt_num",
257                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
258         {"mac_rx_bad_pkt_num",
259                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
260         {"mac_rx_good_oct_num",
261                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
262         {"mac_rx_bad_oct_num",
263                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
264         {"mac_rx_uni_pkt_num",
265                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
266         {"mac_rx_multi_pkt_num",
267                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
268         {"mac_rx_broad_pkt_num",
269                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
270         {"mac_rx_undersize_pkt_num",
271                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
272         {"mac_rx_oversize_pkt_num",
273                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
274         {"mac_rx_64_oct_pkt_num",
275                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
276         {"mac_rx_65_127_oct_pkt_num",
277                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
278         {"mac_rx_128_255_oct_pkt_num",
279                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
280         {"mac_rx_256_511_oct_pkt_num",
281                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
282         {"mac_rx_512_1023_oct_pkt_num",
283                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
284         {"mac_rx_1024_1518_oct_pkt_num",
285                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
286         {"mac_rx_1519_2047_oct_pkt_num",
287                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
288         {"mac_rx_2048_4095_oct_pkt_num",
289                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
290         {"mac_rx_4096_8191_oct_pkt_num",
291                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
292         {"mac_rx_8192_9216_oct_pkt_num",
293                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
294         {"mac_rx_9217_12287_oct_pkt_num",
295                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
296         {"mac_rx_12288_16383_oct_pkt_num",
297                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
298         {"mac_rx_1519_max_good_pkt_num",
299                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
300         {"mac_rx_1519_max_bad_pkt_num",
301                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
302
303         {"mac_tx_fragment_pkt_num",
304                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
305         {"mac_tx_undermin_pkt_num",
306                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
307         {"mac_tx_jabber_pkt_num",
308                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
309         {"mac_tx_err_all_pkt_num",
310                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
311         {"mac_tx_from_app_good_pkt_num",
312                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
313         {"mac_tx_from_app_bad_pkt_num",
314                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
315         {"mac_rx_fragment_pkt_num",
316                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
317         {"mac_rx_undermin_pkt_num",
318                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
319         {"mac_rx_jabber_pkt_num",
320                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
321         {"mac_rx_fcs_err_pkt_num",
322                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
323         {"mac_rx_send_app_good_pkt_num",
324                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
325         {"mac_rx_send_app_bad_pkt_num",
326                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
327 };
328
329 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
330         {
331                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
332                 .ethter_type = cpu_to_le16(ETH_P_LLDP),
333                 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
334                 .i_port_bitmap = 0x1,
335         },
336 };
337
338 static const u8 hclge_hash_key[] = {
339         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
340         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
341         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
342         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
343         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
344 };
345
346 static const u32 hclge_dfx_bd_offset_list[] = {
347         HCLGE_DFX_BIOS_BD_OFFSET,
348         HCLGE_DFX_SSU_0_BD_OFFSET,
349         HCLGE_DFX_SSU_1_BD_OFFSET,
350         HCLGE_DFX_IGU_BD_OFFSET,
351         HCLGE_DFX_RPU_0_BD_OFFSET,
352         HCLGE_DFX_RPU_1_BD_OFFSET,
353         HCLGE_DFX_NCSI_BD_OFFSET,
354         HCLGE_DFX_RTC_BD_OFFSET,
355         HCLGE_DFX_PPP_BD_OFFSET,
356         HCLGE_DFX_RCB_BD_OFFSET,
357         HCLGE_DFX_TQP_BD_OFFSET,
358         HCLGE_DFX_SSU_2_BD_OFFSET
359 };
360
361 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
362         HCLGE_OPC_DFX_BIOS_COMMON_REG,
363         HCLGE_OPC_DFX_SSU_REG_0,
364         HCLGE_OPC_DFX_SSU_REG_1,
365         HCLGE_OPC_DFX_IGU_EGU_REG,
366         HCLGE_OPC_DFX_RPU_REG_0,
367         HCLGE_OPC_DFX_RPU_REG_1,
368         HCLGE_OPC_DFX_NCSI_REG,
369         HCLGE_OPC_DFX_RTC_REG,
370         HCLGE_OPC_DFX_PPP_REG,
371         HCLGE_OPC_DFX_RCB_REG,
372         HCLGE_OPC_DFX_TQP_REG,
373         HCLGE_OPC_DFX_SSU_REG_2
374 };
375
376 static const struct key_info meta_data_key_info[] = {
377         { PACKET_TYPE_ID, 6},
378         { IP_FRAGEMENT, 1},
379         { ROCE_TYPE, 1},
380         { NEXT_KEY, 5},
381         { VLAN_NUMBER, 2},
382         { SRC_VPORT, 12},
383         { DST_VPORT, 12},
384         { TUNNEL_PACKET, 1},
385 };
386
387 static const struct key_info tuple_key_info[] = {
388         { OUTER_DST_MAC, 48},
389         { OUTER_SRC_MAC, 48},
390         { OUTER_VLAN_TAG_FST, 16},
391         { OUTER_VLAN_TAG_SEC, 16},
392         { OUTER_ETH_TYPE, 16},
393         { OUTER_L2_RSV, 16},
394         { OUTER_IP_TOS, 8},
395         { OUTER_IP_PROTO, 8},
396         { OUTER_SRC_IP, 32},
397         { OUTER_DST_IP, 32},
398         { OUTER_L3_RSV, 16},
399         { OUTER_SRC_PORT, 16},
400         { OUTER_DST_PORT, 16},
401         { OUTER_L4_RSV, 32},
402         { OUTER_TUN_VNI, 24},
403         { OUTER_TUN_FLOW_ID, 8},
404         { INNER_DST_MAC, 48},
405         { INNER_SRC_MAC, 48},
406         { INNER_VLAN_TAG_FST, 16},
407         { INNER_VLAN_TAG_SEC, 16},
408         { INNER_ETH_TYPE, 16},
409         { INNER_L2_RSV, 16},
410         { INNER_IP_TOS, 8},
411         { INNER_IP_PROTO, 8},
412         { INNER_SRC_IP, 32},
413         { INNER_DST_IP, 32},
414         { INNER_L3_RSV, 16},
415         { INNER_SRC_PORT, 16},
416         { INNER_DST_PORT, 16},
417         { INNER_L4_RSV, 32},
418 };
419
420 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
421 {
422 #define HCLGE_MAC_CMD_NUM 21
423
424         u64 *data = (u64 *)(&hdev->mac_stats);
425         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
426         __le64 *desc_data;
427         int i, k, n;
428         int ret;
429
430         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
431         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
432         if (ret) {
433                 dev_err(&hdev->pdev->dev,
434                         "Get MAC pkt stats fail, status = %d.\n", ret);
435
436                 return ret;
437         }
438
439         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
440                 /* for special opcode 0032, only the first desc has the head */
441                 if (unlikely(i == 0)) {
442                         desc_data = (__le64 *)(&desc[i].data[0]);
443                         n = HCLGE_RD_FIRST_STATS_NUM;
444                 } else {
445                         desc_data = (__le64 *)(&desc[i]);
446                         n = HCLGE_RD_OTHER_STATS_NUM;
447                 }
448
449                 for (k = 0; k < n; k++) {
450                         *data += le64_to_cpu(*desc_data);
451                         data++;
452                         desc_data++;
453                 }
454         }
455
456         return 0;
457 }
458
459 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
460 {
461         u64 *data = (u64 *)(&hdev->mac_stats);
462         struct hclge_desc *desc;
463         __le64 *desc_data;
464         u16 i, k, n;
465         int ret;
466
467         /* This may be called inside atomic sections,
468          * so GFP_ATOMIC is more suitalbe here
469          */
470         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
471         if (!desc)
472                 return -ENOMEM;
473
474         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
475         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
476         if (ret) {
477                 kfree(desc);
478                 return ret;
479         }
480
481         for (i = 0; i < desc_num; i++) {
482                 /* for special opcode 0034, only the first desc has the head */
483                 if (i == 0) {
484                         desc_data = (__le64 *)(&desc[i].data[0]);
485                         n = HCLGE_RD_FIRST_STATS_NUM;
486                 } else {
487                         desc_data = (__le64 *)(&desc[i]);
488                         n = HCLGE_RD_OTHER_STATS_NUM;
489                 }
490
491                 for (k = 0; k < n; k++) {
492                         *data += le64_to_cpu(*desc_data);
493                         data++;
494                         desc_data++;
495                 }
496         }
497
498         kfree(desc);
499
500         return 0;
501 }
502
503 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
504 {
505         struct hclge_desc desc;
506         __le32 *desc_data;
507         u32 reg_num;
508         int ret;
509
510         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
511         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
512         if (ret)
513                 return ret;
514
515         desc_data = (__le32 *)(&desc.data[0]);
516         reg_num = le32_to_cpu(*desc_data);
517
518         *desc_num = 1 + ((reg_num - 3) >> 2) +
519                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
520
521         return 0;
522 }
523
524 static int hclge_mac_update_stats(struct hclge_dev *hdev)
525 {
526         u32 desc_num;
527         int ret;
528
529         ret = hclge_mac_query_reg_num(hdev, &desc_num);
530
531         /* The firmware supports the new statistics acquisition method */
532         if (!ret)
533                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
534         else if (ret == -EOPNOTSUPP)
535                 ret = hclge_mac_update_stats_defective(hdev);
536         else
537                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
538
539         return ret;
540 }
541
542 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
543 {
544         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
545         struct hclge_vport *vport = hclge_get_vport(handle);
546         struct hclge_dev *hdev = vport->back;
547         struct hnae3_queue *queue;
548         struct hclge_desc desc[1];
549         struct hclge_tqp *tqp;
550         int ret, i;
551
552         for (i = 0; i < kinfo->num_tqps; i++) {
553                 queue = handle->kinfo.tqp[i];
554                 tqp = container_of(queue, struct hclge_tqp, q);
555                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
556                 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
557                                            true);
558
559                 desc[0].data[0] = cpu_to_le32(tqp->index);
560                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
561                 if (ret) {
562                         dev_err(&hdev->pdev->dev,
563                                 "Query tqp stat fail, status = %d,queue = %d\n",
564                                 ret, i);
565                         return ret;
566                 }
567                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
568                         le32_to_cpu(desc[0].data[1]);
569         }
570
571         for (i = 0; i < kinfo->num_tqps; i++) {
572                 queue = handle->kinfo.tqp[i];
573                 tqp = container_of(queue, struct hclge_tqp, q);
574                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
575                 hclge_cmd_setup_basic_desc(&desc[0],
576                                            HCLGE_OPC_QUERY_TX_STATS,
577                                            true);
578
579                 desc[0].data[0] = cpu_to_le32(tqp->index);
580                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
581                 if (ret) {
582                         dev_err(&hdev->pdev->dev,
583                                 "Query tqp stat fail, status = %d,queue = %d\n",
584                                 ret, i);
585                         return ret;
586                 }
587                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
588                         le32_to_cpu(desc[0].data[1]);
589         }
590
591         return 0;
592 }
593
594 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
595 {
596         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
597         struct hclge_tqp *tqp;
598         u64 *buff = data;
599         int i;
600
601         for (i = 0; i < kinfo->num_tqps; i++) {
602                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
603                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
604         }
605
606         for (i = 0; i < kinfo->num_tqps; i++) {
607                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
608                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
609         }
610
611         return buff;
612 }
613
614 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
615 {
616         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
617
618         /* each tqp has TX & RX two queues */
619         return kinfo->num_tqps * (2);
620 }
621
622 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
623 {
624         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
625         u8 *buff = data;
626         int i;
627
628         for (i = 0; i < kinfo->num_tqps; i++) {
629                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
630                         struct hclge_tqp, q);
631                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
632                          tqp->index);
633                 buff = buff + ETH_GSTRING_LEN;
634         }
635
636         for (i = 0; i < kinfo->num_tqps; i++) {
637                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
638                         struct hclge_tqp, q);
639                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
640                          tqp->index);
641                 buff = buff + ETH_GSTRING_LEN;
642         }
643
644         return buff;
645 }
646
647 static u64 *hclge_comm_get_stats(const void *comm_stats,
648                                  const struct hclge_comm_stats_str strs[],
649                                  int size, u64 *data)
650 {
651         u64 *buf = data;
652         u32 i;
653
654         for (i = 0; i < size; i++)
655                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
656
657         return buf + size;
658 }
659
660 static u8 *hclge_comm_get_strings(u32 stringset,
661                                   const struct hclge_comm_stats_str strs[],
662                                   int size, u8 *data)
663 {
664         char *buff = (char *)data;
665         u32 i;
666
667         if (stringset != ETH_SS_STATS)
668                 return buff;
669
670         for (i = 0; i < size; i++) {
671                 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
672                 buff = buff + ETH_GSTRING_LEN;
673         }
674
675         return (u8 *)buff;
676 }
677
678 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
679 {
680         struct hnae3_handle *handle;
681         int status;
682
683         handle = &hdev->vport[0].nic;
684         if (handle->client) {
685                 status = hclge_tqps_update_stats(handle);
686                 if (status) {
687                         dev_err(&hdev->pdev->dev,
688                                 "Update TQPS stats fail, status = %d.\n",
689                                 status);
690                 }
691         }
692
693         status = hclge_mac_update_stats(hdev);
694         if (status)
695                 dev_err(&hdev->pdev->dev,
696                         "Update MAC stats fail, status = %d.\n", status);
697 }
698
699 static void hclge_update_stats(struct hnae3_handle *handle,
700                                struct net_device_stats *net_stats)
701 {
702         struct hclge_vport *vport = hclge_get_vport(handle);
703         struct hclge_dev *hdev = vport->back;
704         int status;
705
706         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
707                 return;
708
709         status = hclge_mac_update_stats(hdev);
710         if (status)
711                 dev_err(&hdev->pdev->dev,
712                         "Update MAC stats fail, status = %d.\n",
713                         status);
714
715         status = hclge_tqps_update_stats(handle);
716         if (status)
717                 dev_err(&hdev->pdev->dev,
718                         "Update TQPS stats fail, status = %d.\n",
719                         status);
720
721         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
722 }
723
724 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
725 {
726 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
727                 HNAE3_SUPPORT_PHY_LOOPBACK |\
728                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
729                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
730
731         struct hclge_vport *vport = hclge_get_vport(handle);
732         struct hclge_dev *hdev = vport->back;
733         int count = 0;
734
735         /* Loopback test support rules:
736          * mac: only GE mode support
737          * serdes: all mac mode will support include GE/XGE/LGE/CGE
738          * phy: only support when phy device exist on board
739          */
740         if (stringset == ETH_SS_TEST) {
741                 /* clear loopback bit flags at first */
742                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
743                 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
744                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
745                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
746                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
747                         count += 1;
748                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
749                 }
750
751                 count += 2;
752                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
753                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
754
755                 if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
756                     hdev->hw.mac.phydev->drv->set_loopback) {
757                         count += 1;
758                         handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
759                 }
760
761         } else if (stringset == ETH_SS_STATS) {
762                 count = ARRAY_SIZE(g_mac_stats_string) +
763                         hclge_tqps_get_sset_count(handle, stringset);
764         }
765
766         return count;
767 }
768
769 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
770                               u8 *data)
771 {
772         u8 *p = (char *)data;
773         int size;
774
775         if (stringset == ETH_SS_STATS) {
776                 size = ARRAY_SIZE(g_mac_stats_string);
777                 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
778                                            size, p);
779                 p = hclge_tqps_get_strings(handle, p);
780         } else if (stringset == ETH_SS_TEST) {
781                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
782                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
783                                ETH_GSTRING_LEN);
784                         p += ETH_GSTRING_LEN;
785                 }
786                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
787                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
788                                ETH_GSTRING_LEN);
789                         p += ETH_GSTRING_LEN;
790                 }
791                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
792                         memcpy(p,
793                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
794                                ETH_GSTRING_LEN);
795                         p += ETH_GSTRING_LEN;
796                 }
797                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
798                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
799                                ETH_GSTRING_LEN);
800                         p += ETH_GSTRING_LEN;
801                 }
802         }
803 }
804
805 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
806 {
807         struct hclge_vport *vport = hclge_get_vport(handle);
808         struct hclge_dev *hdev = vport->back;
809         u64 *p;
810
811         p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
812                                  ARRAY_SIZE(g_mac_stats_string), data);
813         p = hclge_tqps_get_stats(handle, p);
814 }
815
816 static void hclge_get_mac_stat(struct hnae3_handle *handle,
817                                struct hns3_mac_stats *mac_stats)
818 {
819         struct hclge_vport *vport = hclge_get_vport(handle);
820         struct hclge_dev *hdev = vport->back;
821
822         hclge_update_stats(handle, NULL);
823
824         mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
825         mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
826 }
827
828 static int hclge_parse_func_status(struct hclge_dev *hdev,
829                                    struct hclge_func_status_cmd *status)
830 {
831 #define HCLGE_MAC_ID_MASK       0xF
832
833         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
834                 return -EINVAL;
835
836         /* Set the pf to main pf */
837         if (status->pf_state & HCLGE_PF_STATE_MAIN)
838                 hdev->flag |= HCLGE_FLAG_MAIN;
839         else
840                 hdev->flag &= ~HCLGE_FLAG_MAIN;
841
842         hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
843         return 0;
844 }
845
846 static int hclge_query_function_status(struct hclge_dev *hdev)
847 {
848 #define HCLGE_QUERY_MAX_CNT     5
849
850         struct hclge_func_status_cmd *req;
851         struct hclge_desc desc;
852         int timeout = 0;
853         int ret;
854
855         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
856         req = (struct hclge_func_status_cmd *)desc.data;
857
858         do {
859                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
860                 if (ret) {
861                         dev_err(&hdev->pdev->dev,
862                                 "query function status failed %d.\n", ret);
863                         return ret;
864                 }
865
866                 /* Check pf reset is done */
867                 if (req->pf_state)
868                         break;
869                 usleep_range(1000, 2000);
870         } while (timeout++ < HCLGE_QUERY_MAX_CNT);
871
872         return hclge_parse_func_status(hdev, req);
873 }
874
875 static int hclge_query_pf_resource(struct hclge_dev *hdev)
876 {
877         struct hclge_pf_res_cmd *req;
878         struct hclge_desc desc;
879         int ret;
880
881         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
882         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
883         if (ret) {
884                 dev_err(&hdev->pdev->dev,
885                         "query pf resource failed %d.\n", ret);
886                 return ret;
887         }
888
889         req = (struct hclge_pf_res_cmd *)desc.data;
890         hdev->num_tqps = le16_to_cpu(req->tqp_num) +
891                          le16_to_cpu(req->ext_tqp_num);
892         hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
893
894         if (req->tx_buf_size)
895                 hdev->tx_buf_size =
896                         le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
897         else
898                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
899
900         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
901
902         if (req->dv_buf_size)
903                 hdev->dv_buf_size =
904                         le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
905         else
906                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
907
908         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
909
910         hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
911         if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
912                 dev_err(&hdev->pdev->dev,
913                         "only %u msi resources available, not enough for pf(min:2).\n",
914                         hdev->num_nic_msi);
915                 return -EINVAL;
916         }
917
918         if (hnae3_dev_roce_supported(hdev)) {
919                 hdev->num_roce_msi =
920                         le16_to_cpu(req->pf_intr_vector_number_roce);
921
922                 /* PF should have NIC vectors and Roce vectors,
923                  * NIC vectors are queued before Roce vectors.
924                  */
925                 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
926         } else {
927                 hdev->num_msi = hdev->num_nic_msi;
928         }
929
930         return 0;
931 }
932
933 static int hclge_parse_speed(int speed_cmd, int *speed)
934 {
935         switch (speed_cmd) {
936         case 6:
937                 *speed = HCLGE_MAC_SPEED_10M;
938                 break;
939         case 7:
940                 *speed = HCLGE_MAC_SPEED_100M;
941                 break;
942         case 0:
943                 *speed = HCLGE_MAC_SPEED_1G;
944                 break;
945         case 1:
946                 *speed = HCLGE_MAC_SPEED_10G;
947                 break;
948         case 2:
949                 *speed = HCLGE_MAC_SPEED_25G;
950                 break;
951         case 3:
952                 *speed = HCLGE_MAC_SPEED_40G;
953                 break;
954         case 4:
955                 *speed = HCLGE_MAC_SPEED_50G;
956                 break;
957         case 5:
958                 *speed = HCLGE_MAC_SPEED_100G;
959                 break;
960         case 8:
961                 *speed = HCLGE_MAC_SPEED_200G;
962                 break;
963         default:
964                 return -EINVAL;
965         }
966
967         return 0;
968 }
969
970 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
971 {
972         struct hclge_vport *vport = hclge_get_vport(handle);
973         struct hclge_dev *hdev = vport->back;
974         u32 speed_ability = hdev->hw.mac.speed_ability;
975         u32 speed_bit = 0;
976
977         switch (speed) {
978         case HCLGE_MAC_SPEED_10M:
979                 speed_bit = HCLGE_SUPPORT_10M_BIT;
980                 break;
981         case HCLGE_MAC_SPEED_100M:
982                 speed_bit = HCLGE_SUPPORT_100M_BIT;
983                 break;
984         case HCLGE_MAC_SPEED_1G:
985                 speed_bit = HCLGE_SUPPORT_1G_BIT;
986                 break;
987         case HCLGE_MAC_SPEED_10G:
988                 speed_bit = HCLGE_SUPPORT_10G_BIT;
989                 break;
990         case HCLGE_MAC_SPEED_25G:
991                 speed_bit = HCLGE_SUPPORT_25G_BIT;
992                 break;
993         case HCLGE_MAC_SPEED_40G:
994                 speed_bit = HCLGE_SUPPORT_40G_BIT;
995                 break;
996         case HCLGE_MAC_SPEED_50G:
997                 speed_bit = HCLGE_SUPPORT_50G_BIT;
998                 break;
999         case HCLGE_MAC_SPEED_100G:
1000                 speed_bit = HCLGE_SUPPORT_100G_BIT;
1001                 break;
1002         case HCLGE_MAC_SPEED_200G:
1003                 speed_bit = HCLGE_SUPPORT_200G_BIT;
1004                 break;
1005         default:
1006                 return -EINVAL;
1007         }
1008
1009         if (speed_bit & speed_ability)
1010                 return 0;
1011
1012         return -EINVAL;
1013 }
1014
1015 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1016 {
1017         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1018                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1019                                  mac->supported);
1020         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1021                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1022                                  mac->supported);
1023         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1024                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1025                                  mac->supported);
1026         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1027                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1028                                  mac->supported);
1029         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1030                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1031                                  mac->supported);
1032         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1033                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1034                                  mac->supported);
1035 }
1036
1037 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1038 {
1039         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1040                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1041                                  mac->supported);
1042         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1043                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1044                                  mac->supported);
1045         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1046                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1047                                  mac->supported);
1048         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1049                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1050                                  mac->supported);
1051         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1052                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1053                                  mac->supported);
1054         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1055                 linkmode_set_bit(
1056                         ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1057                         mac->supported);
1058 }
1059
1060 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1061 {
1062         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1063                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1064                                  mac->supported);
1065         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1066                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1067                                  mac->supported);
1068         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1069                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1070                                  mac->supported);
1071         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1072                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1073                                  mac->supported);
1074         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1075                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1076                                  mac->supported);
1077         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1078                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1079                                  mac->supported);
1080 }
1081
1082 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1083 {
1084         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1085                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1086                                  mac->supported);
1087         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1088                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1089                                  mac->supported);
1090         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1091                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1092                                  mac->supported);
1093         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1094                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1095                                  mac->supported);
1096         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1097                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1098                                  mac->supported);
1099         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1100                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1101                                  mac->supported);
1102         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1103                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1104                                  mac->supported);
1105 }
1106
1107 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1108 {
1109         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1110         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1111
1112         switch (mac->speed) {
1113         case HCLGE_MAC_SPEED_10G:
1114         case HCLGE_MAC_SPEED_40G:
1115                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1116                                  mac->supported);
1117                 mac->fec_ability =
1118                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1119                 break;
1120         case HCLGE_MAC_SPEED_25G:
1121         case HCLGE_MAC_SPEED_50G:
1122                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1123                                  mac->supported);
1124                 mac->fec_ability =
1125                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1126                         BIT(HNAE3_FEC_AUTO);
1127                 break;
1128         case HCLGE_MAC_SPEED_100G:
1129         case HCLGE_MAC_SPEED_200G:
1130                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1131                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1132                 break;
1133         default:
1134                 mac->fec_ability = 0;
1135                 break;
1136         }
1137 }
1138
1139 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1140                                         u16 speed_ability)
1141 {
1142         struct hclge_mac *mac = &hdev->hw.mac;
1143
1144         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1145                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1146                                  mac->supported);
1147
1148         hclge_convert_setting_sr(mac, speed_ability);
1149         hclge_convert_setting_lr(mac, speed_ability);
1150         hclge_convert_setting_cr(mac, speed_ability);
1151         if (hnae3_dev_fec_supported(hdev))
1152                 hclge_convert_setting_fec(mac);
1153
1154         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1155         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1156         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1157 }
1158
1159 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1160                                             u16 speed_ability)
1161 {
1162         struct hclge_mac *mac = &hdev->hw.mac;
1163
1164         hclge_convert_setting_kr(mac, speed_ability);
1165         if (hnae3_dev_fec_supported(hdev))
1166                 hclge_convert_setting_fec(mac);
1167         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1168         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1169         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1170 }
1171
1172 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1173                                          u16 speed_ability)
1174 {
1175         unsigned long *supported = hdev->hw.mac.supported;
1176
1177         /* default to support all speed for GE port */
1178         if (!speed_ability)
1179                 speed_ability = HCLGE_SUPPORT_GE;
1180
1181         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1182                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1183                                  supported);
1184
1185         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1186                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1187                                  supported);
1188                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1189                                  supported);
1190         }
1191
1192         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1193                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1194                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1195         }
1196
1197         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1198         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1199         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1200         linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1201 }
1202
1203 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1204 {
1205         u8 media_type = hdev->hw.mac.media_type;
1206
1207         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1208                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1209         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1210                 hclge_parse_copper_link_mode(hdev, speed_ability);
1211         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1212                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1213 }
1214
1215 static u32 hclge_get_max_speed(u16 speed_ability)
1216 {
1217         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1218                 return HCLGE_MAC_SPEED_200G;
1219
1220         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1221                 return HCLGE_MAC_SPEED_100G;
1222
1223         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1224                 return HCLGE_MAC_SPEED_50G;
1225
1226         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1227                 return HCLGE_MAC_SPEED_40G;
1228
1229         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1230                 return HCLGE_MAC_SPEED_25G;
1231
1232         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1233                 return HCLGE_MAC_SPEED_10G;
1234
1235         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1236                 return HCLGE_MAC_SPEED_1G;
1237
1238         if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1239                 return HCLGE_MAC_SPEED_100M;
1240
1241         if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1242                 return HCLGE_MAC_SPEED_10M;
1243
1244         return HCLGE_MAC_SPEED_1G;
1245 }
1246
1247 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1248 {
1249 #define SPEED_ABILITY_EXT_SHIFT                 8
1250
1251         struct hclge_cfg_param_cmd *req;
1252         u64 mac_addr_tmp_high;
1253         u16 speed_ability_ext;
1254         u64 mac_addr_tmp;
1255         unsigned int i;
1256
1257         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1258
1259         /* get the configuration */
1260         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1261                                               HCLGE_CFG_VMDQ_M,
1262                                               HCLGE_CFG_VMDQ_S);
1263         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1264                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1265         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1266                                             HCLGE_CFG_TQP_DESC_N_M,
1267                                             HCLGE_CFG_TQP_DESC_N_S);
1268
1269         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1270                                         HCLGE_CFG_PHY_ADDR_M,
1271                                         HCLGE_CFG_PHY_ADDR_S);
1272         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1273                                           HCLGE_CFG_MEDIA_TP_M,
1274                                           HCLGE_CFG_MEDIA_TP_S);
1275         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1276                                           HCLGE_CFG_RX_BUF_LEN_M,
1277                                           HCLGE_CFG_RX_BUF_LEN_S);
1278         /* get mac_address */
1279         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1280         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1281                                             HCLGE_CFG_MAC_ADDR_H_M,
1282                                             HCLGE_CFG_MAC_ADDR_H_S);
1283
1284         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1285
1286         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1287                                              HCLGE_CFG_DEFAULT_SPEED_M,
1288                                              HCLGE_CFG_DEFAULT_SPEED_S);
1289         cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1290                                                HCLGE_CFG_RSS_SIZE_M,
1291                                                HCLGE_CFG_RSS_SIZE_S);
1292
1293         for (i = 0; i < ETH_ALEN; i++)
1294                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1295
1296         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1297         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1298
1299         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1300                                              HCLGE_CFG_SPEED_ABILITY_M,
1301                                              HCLGE_CFG_SPEED_ABILITY_S);
1302         speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1303                                             HCLGE_CFG_SPEED_ABILITY_EXT_M,
1304                                             HCLGE_CFG_SPEED_ABILITY_EXT_S);
1305         cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1306
1307         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1308                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1309                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1310         if (!cfg->umv_space)
1311                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1312
1313         cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1314                                                HCLGE_CFG_PF_RSS_SIZE_M,
1315                                                HCLGE_CFG_PF_RSS_SIZE_S);
1316
1317         /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1318          * power of 2, instead of reading out directly. This would
1319          * be more flexible for future changes and expansions.
1320          * When VF max  rss size field is HCLGE_CFG_RSS_SIZE_S,
1321          * it does not make sense if PF's field is 0. In this case, PF and VF
1322          * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1323          */
1324         cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1325                                1U << cfg->pf_rss_size_max :
1326                                cfg->vf_rss_size_max;
1327 }
1328
1329 /* hclge_get_cfg: query the static parameter from flash
1330  * @hdev: pointer to struct hclge_dev
1331  * @hcfg: the config structure to be getted
1332  */
1333 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1334 {
1335         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1336         struct hclge_cfg_param_cmd *req;
1337         unsigned int i;
1338         int ret;
1339
1340         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1341                 u32 offset = 0;
1342
1343                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1344                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1345                                            true);
1346                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1347                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1348                 /* Len should be united by 4 bytes when send to hardware */
1349                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1350                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1351                 req->offset = cpu_to_le32(offset);
1352         }
1353
1354         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1355         if (ret) {
1356                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1357                 return ret;
1358         }
1359
1360         hclge_parse_cfg(hcfg, desc);
1361
1362         return 0;
1363 }
1364
1365 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1366 {
1367 #define HCLGE_MAX_NON_TSO_BD_NUM                        8U
1368
1369         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1370
1371         ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1372         ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1373         ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1374         ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1375         ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1376 }
1377
1378 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1379                                   struct hclge_desc *desc)
1380 {
1381         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1382         struct hclge_dev_specs_0_cmd *req0;
1383         struct hclge_dev_specs_1_cmd *req1;
1384
1385         req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1386         req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1387
1388         ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1389         ae_dev->dev_specs.rss_ind_tbl_size =
1390                 le16_to_cpu(req0->rss_ind_tbl_size);
1391         ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1392         ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1393         ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1394         ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1395 }
1396
1397 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1398 {
1399         struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1400
1401         if (!dev_specs->max_non_tso_bd_num)
1402                 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1403         if (!dev_specs->rss_ind_tbl_size)
1404                 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1405         if (!dev_specs->rss_key_size)
1406                 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1407         if (!dev_specs->max_tm_rate)
1408                 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1409         if (!dev_specs->max_int_gl)
1410                 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1411 }
1412
1413 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1414 {
1415         struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1416         int ret;
1417         int i;
1418
1419         /* set default specifications as devices lower than version V3 do not
1420          * support querying specifications from firmware.
1421          */
1422         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1423                 hclge_set_default_dev_specs(hdev);
1424                 return 0;
1425         }
1426
1427         for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1428                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1429                                            true);
1430                 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1431         }
1432         hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1433
1434         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1435         if (ret)
1436                 return ret;
1437
1438         hclge_parse_dev_specs(hdev, desc);
1439         hclge_check_dev_specs(hdev);
1440
1441         return 0;
1442 }
1443
1444 static int hclge_get_cap(struct hclge_dev *hdev)
1445 {
1446         int ret;
1447
1448         ret = hclge_query_function_status(hdev);
1449         if (ret) {
1450                 dev_err(&hdev->pdev->dev,
1451                         "query function status error %d.\n", ret);
1452                 return ret;
1453         }
1454
1455         /* get pf resource */
1456         return hclge_query_pf_resource(hdev);
1457 }
1458
1459 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1460 {
1461 #define HCLGE_MIN_TX_DESC       64
1462 #define HCLGE_MIN_RX_DESC       64
1463
1464         if (!is_kdump_kernel())
1465                 return;
1466
1467         dev_info(&hdev->pdev->dev,
1468                  "Running kdump kernel. Using minimal resources\n");
1469
1470         /* minimal queue pairs equals to the number of vports */
1471         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1472         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1473         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1474 }
1475
1476 static int hclge_configure(struct hclge_dev *hdev)
1477 {
1478         struct hclge_cfg cfg;
1479         unsigned int i;
1480         int ret;
1481
1482         ret = hclge_get_cfg(hdev, &cfg);
1483         if (ret)
1484                 return ret;
1485
1486         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1487         hdev->base_tqp_pid = 0;
1488         hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1489         hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1490         hdev->rx_buf_len = cfg.rx_buf_len;
1491         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1492         hdev->hw.mac.media_type = cfg.media_type;
1493         hdev->hw.mac.phy_addr = cfg.phy_addr;
1494         hdev->num_tx_desc = cfg.tqp_desc_num;
1495         hdev->num_rx_desc = cfg.tqp_desc_num;
1496         hdev->tm_info.num_pg = 1;
1497         hdev->tc_max = cfg.tc_num;
1498         hdev->tm_info.hw_pfc_map = 0;
1499         hdev->wanted_umv_size = cfg.umv_space;
1500
1501         if (hnae3_dev_fd_supported(hdev)) {
1502                 hdev->fd_en = true;
1503                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1504         }
1505
1506         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1507         if (ret) {
1508                 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1509                         cfg.default_speed, ret);
1510                 return ret;
1511         }
1512
1513         hclge_parse_link_mode(hdev, cfg.speed_ability);
1514
1515         hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1516
1517         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1518             (hdev->tc_max < 1)) {
1519                 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1520                          hdev->tc_max);
1521                 hdev->tc_max = 1;
1522         }
1523
1524         /* Dev does not support DCB */
1525         if (!hnae3_dev_dcb_supported(hdev)) {
1526                 hdev->tc_max = 1;
1527                 hdev->pfc_max = 0;
1528         } else {
1529                 hdev->pfc_max = hdev->tc_max;
1530         }
1531
1532         hdev->tm_info.num_tc = 1;
1533
1534         /* Currently not support uncontiuous tc */
1535         for (i = 0; i < hdev->tm_info.num_tc; i++)
1536                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1537
1538         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1539
1540         hclge_init_kdump_kernel_config(hdev);
1541
1542         /* Set the init affinity based on pci func number */
1543         i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1544         i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1545         cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1546                         &hdev->affinity_mask);
1547
1548         return ret;
1549 }
1550
1551 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1552                             u16 tso_mss_max)
1553 {
1554         struct hclge_cfg_tso_status_cmd *req;
1555         struct hclge_desc desc;
1556
1557         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1558
1559         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1560         req->tso_mss_min = cpu_to_le16(tso_mss_min);
1561         req->tso_mss_max = cpu_to_le16(tso_mss_max);
1562
1563         return hclge_cmd_send(&hdev->hw, &desc, 1);
1564 }
1565
1566 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1567 {
1568         struct hclge_cfg_gro_status_cmd *req;
1569         struct hclge_desc desc;
1570         int ret;
1571
1572         if (!hnae3_dev_gro_supported(hdev))
1573                 return 0;
1574
1575         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1576         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1577
1578         req->gro_en = en ? 1 : 0;
1579
1580         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1581         if (ret)
1582                 dev_err(&hdev->pdev->dev,
1583                         "GRO hardware config cmd failed, ret = %d\n", ret);
1584
1585         return ret;
1586 }
1587
1588 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1589 {
1590         struct hclge_tqp *tqp;
1591         int i;
1592
1593         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1594                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1595         if (!hdev->htqp)
1596                 return -ENOMEM;
1597
1598         tqp = hdev->htqp;
1599
1600         for (i = 0; i < hdev->num_tqps; i++) {
1601                 tqp->dev = &hdev->pdev->dev;
1602                 tqp->index = i;
1603
1604                 tqp->q.ae_algo = &ae_algo;
1605                 tqp->q.buf_size = hdev->rx_buf_len;
1606                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1607                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1608
1609                 /* need an extended offset to configure queues >=
1610                  * HCLGE_TQP_MAX_SIZE_DEV_V2
1611                  */
1612                 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1613                         tqp->q.io_base = hdev->hw.io_base +
1614                                          HCLGE_TQP_REG_OFFSET +
1615                                          i * HCLGE_TQP_REG_SIZE;
1616                 else
1617                         tqp->q.io_base = hdev->hw.io_base +
1618                                          HCLGE_TQP_REG_OFFSET +
1619                                          HCLGE_TQP_EXT_REG_OFFSET +
1620                                          (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1621                                          HCLGE_TQP_REG_SIZE;
1622
1623                 tqp++;
1624         }
1625
1626         return 0;
1627 }
1628
1629 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1630                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1631 {
1632         struct hclge_tqp_map_cmd *req;
1633         struct hclge_desc desc;
1634         int ret;
1635
1636         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1637
1638         req = (struct hclge_tqp_map_cmd *)desc.data;
1639         req->tqp_id = cpu_to_le16(tqp_pid);
1640         req->tqp_vf = func_id;
1641         req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1642         if (!is_pf)
1643                 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1644         req->tqp_vid = cpu_to_le16(tqp_vid);
1645
1646         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1647         if (ret)
1648                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1649
1650         return ret;
1651 }
1652
1653 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1654 {
1655         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1656         struct hclge_dev *hdev = vport->back;
1657         int i, alloced;
1658
1659         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1660              alloced < num_tqps; i++) {
1661                 if (!hdev->htqp[i].alloced) {
1662                         hdev->htqp[i].q.handle = &vport->nic;
1663                         hdev->htqp[i].q.tqp_index = alloced;
1664                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1665                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1666                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1667                         hdev->htqp[i].alloced = true;
1668                         alloced++;
1669                 }
1670         }
1671         vport->alloc_tqps = alloced;
1672         kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1673                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1674
1675         /* ensure one to one mapping between irq and queue at default */
1676         kinfo->rss_size = min_t(u16, kinfo->rss_size,
1677                                 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1678
1679         return 0;
1680 }
1681
1682 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1683                             u16 num_tx_desc, u16 num_rx_desc)
1684
1685 {
1686         struct hnae3_handle *nic = &vport->nic;
1687         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1688         struct hclge_dev *hdev = vport->back;
1689         int ret;
1690
1691         kinfo->num_tx_desc = num_tx_desc;
1692         kinfo->num_rx_desc = num_rx_desc;
1693
1694         kinfo->rx_buf_len = hdev->rx_buf_len;
1695
1696         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1697                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1698         if (!kinfo->tqp)
1699                 return -ENOMEM;
1700
1701         ret = hclge_assign_tqp(vport, num_tqps);
1702         if (ret)
1703                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1704
1705         return ret;
1706 }
1707
1708 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1709                                   struct hclge_vport *vport)
1710 {
1711         struct hnae3_handle *nic = &vport->nic;
1712         struct hnae3_knic_private_info *kinfo;
1713         u16 i;
1714
1715         kinfo = &nic->kinfo;
1716         for (i = 0; i < vport->alloc_tqps; i++) {
1717                 struct hclge_tqp *q =
1718                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1719                 bool is_pf;
1720                 int ret;
1721
1722                 is_pf = !(vport->vport_id);
1723                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1724                                              i, is_pf);
1725                 if (ret)
1726                         return ret;
1727         }
1728
1729         return 0;
1730 }
1731
1732 static int hclge_map_tqp(struct hclge_dev *hdev)
1733 {
1734         struct hclge_vport *vport = hdev->vport;
1735         u16 i, num_vport;
1736
1737         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1738         for (i = 0; i < num_vport; i++) {
1739                 int ret;
1740
1741                 ret = hclge_map_tqp_to_vport(hdev, vport);
1742                 if (ret)
1743                         return ret;
1744
1745                 vport++;
1746         }
1747
1748         return 0;
1749 }
1750
1751 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1752 {
1753         struct hnae3_handle *nic = &vport->nic;
1754         struct hclge_dev *hdev = vport->back;
1755         int ret;
1756
1757         nic->pdev = hdev->pdev;
1758         nic->ae_algo = &ae_algo;
1759         nic->numa_node_mask = hdev->numa_node_mask;
1760
1761         ret = hclge_knic_setup(vport, num_tqps,
1762                                hdev->num_tx_desc, hdev->num_rx_desc);
1763         if (ret)
1764                 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1765
1766         return ret;
1767 }
1768
1769 static int hclge_alloc_vport(struct hclge_dev *hdev)
1770 {
1771         struct pci_dev *pdev = hdev->pdev;
1772         struct hclge_vport *vport;
1773         u32 tqp_main_vport;
1774         u32 tqp_per_vport;
1775         int num_vport, i;
1776         int ret;
1777
1778         /* We need to alloc a vport for main NIC of PF */
1779         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1780
1781         if (hdev->num_tqps < num_vport) {
1782                 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1783                         hdev->num_tqps, num_vport);
1784                 return -EINVAL;
1785         }
1786
1787         /* Alloc the same number of TQPs for every vport */
1788         tqp_per_vport = hdev->num_tqps / num_vport;
1789         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1790
1791         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1792                              GFP_KERNEL);
1793         if (!vport)
1794                 return -ENOMEM;
1795
1796         hdev->vport = vport;
1797         hdev->num_alloc_vport = num_vport;
1798
1799         if (IS_ENABLED(CONFIG_PCI_IOV))
1800                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1801
1802         for (i = 0; i < num_vport; i++) {
1803                 vport->back = hdev;
1804                 vport->vport_id = i;
1805                 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1806                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1807                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1808                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1809                 INIT_LIST_HEAD(&vport->vlan_list);
1810                 INIT_LIST_HEAD(&vport->uc_mac_list);
1811                 INIT_LIST_HEAD(&vport->mc_mac_list);
1812                 spin_lock_init(&vport->mac_list_lock);
1813
1814                 if (i == 0)
1815                         ret = hclge_vport_setup(vport, tqp_main_vport);
1816                 else
1817                         ret = hclge_vport_setup(vport, tqp_per_vport);
1818                 if (ret) {
1819                         dev_err(&pdev->dev,
1820                                 "vport setup failed for vport %d, %d\n",
1821                                 i, ret);
1822                         return ret;
1823                 }
1824
1825                 vport++;
1826         }
1827
1828         return 0;
1829 }
1830
1831 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1832                                     struct hclge_pkt_buf_alloc *buf_alloc)
1833 {
1834 /* TX buffer size is unit by 128 byte */
1835 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1836 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1837         struct hclge_tx_buff_alloc_cmd *req;
1838         struct hclge_desc desc;
1839         int ret;
1840         u8 i;
1841
1842         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1843
1844         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1845         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1846                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1847
1848                 req->tx_pkt_buff[i] =
1849                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1850                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1851         }
1852
1853         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1854         if (ret)
1855                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1856                         ret);
1857
1858         return ret;
1859 }
1860
1861 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1862                                  struct hclge_pkt_buf_alloc *buf_alloc)
1863 {
1864         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1865
1866         if (ret)
1867                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1868
1869         return ret;
1870 }
1871
1872 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1873 {
1874         unsigned int i;
1875         u32 cnt = 0;
1876
1877         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1878                 if (hdev->hw_tc_map & BIT(i))
1879                         cnt++;
1880         return cnt;
1881 }
1882
1883 /* Get the number of pfc enabled TCs, which have private buffer */
1884 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1885                                   struct hclge_pkt_buf_alloc *buf_alloc)
1886 {
1887         struct hclge_priv_buf *priv;
1888         unsigned int i;
1889         int cnt = 0;
1890
1891         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1892                 priv = &buf_alloc->priv_buf[i];
1893                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1894                     priv->enable)
1895                         cnt++;
1896         }
1897
1898         return cnt;
1899 }
1900
1901 /* Get the number of pfc disabled TCs, which have private buffer */
1902 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1903                                      struct hclge_pkt_buf_alloc *buf_alloc)
1904 {
1905         struct hclge_priv_buf *priv;
1906         unsigned int i;
1907         int cnt = 0;
1908
1909         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1910                 priv = &buf_alloc->priv_buf[i];
1911                 if (hdev->hw_tc_map & BIT(i) &&
1912                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1913                     priv->enable)
1914                         cnt++;
1915         }
1916
1917         return cnt;
1918 }
1919
1920 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1921 {
1922         struct hclge_priv_buf *priv;
1923         u32 rx_priv = 0;
1924         int i;
1925
1926         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1927                 priv = &buf_alloc->priv_buf[i];
1928                 if (priv->enable)
1929                         rx_priv += priv->buf_size;
1930         }
1931         return rx_priv;
1932 }
1933
1934 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1935 {
1936         u32 i, total_tx_size = 0;
1937
1938         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1939                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1940
1941         return total_tx_size;
1942 }
1943
1944 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1945                                 struct hclge_pkt_buf_alloc *buf_alloc,
1946                                 u32 rx_all)
1947 {
1948         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1949         u32 tc_num = hclge_get_tc_num(hdev);
1950         u32 shared_buf, aligned_mps;
1951         u32 rx_priv;
1952         int i;
1953
1954         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1955
1956         if (hnae3_dev_dcb_supported(hdev))
1957                 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1958                                         hdev->dv_buf_size;
1959         else
1960                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1961                                         + hdev->dv_buf_size;
1962
1963         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1964         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1965                              HCLGE_BUF_SIZE_UNIT);
1966
1967         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1968         if (rx_all < rx_priv + shared_std)
1969                 return false;
1970
1971         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1972         buf_alloc->s_buf.buf_size = shared_buf;
1973         if (hnae3_dev_dcb_supported(hdev)) {
1974                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1975                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1976                         - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1977                                   HCLGE_BUF_SIZE_UNIT);
1978         } else {
1979                 buf_alloc->s_buf.self.high = aligned_mps +
1980                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1981                 buf_alloc->s_buf.self.low = aligned_mps;
1982         }
1983
1984         if (hnae3_dev_dcb_supported(hdev)) {
1985                 hi_thrd = shared_buf - hdev->dv_buf_size;
1986
1987                 if (tc_num <= NEED_RESERVE_TC_NUM)
1988                         hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1989                                         / BUF_MAX_PERCENT;
1990
1991                 if (tc_num)
1992                         hi_thrd = hi_thrd / tc_num;
1993
1994                 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1995                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1996                 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1997         } else {
1998                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1999                 lo_thrd = aligned_mps;
2000         }
2001
2002         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2003                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2004                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2005         }
2006
2007         return true;
2008 }
2009
2010 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2011                                 struct hclge_pkt_buf_alloc *buf_alloc)
2012 {
2013         u32 i, total_size;
2014
2015         total_size = hdev->pkt_buf_size;
2016
2017         /* alloc tx buffer for all enabled tc */
2018         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2019                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2020
2021                 if (hdev->hw_tc_map & BIT(i)) {
2022                         if (total_size < hdev->tx_buf_size)
2023                                 return -ENOMEM;
2024
2025                         priv->tx_buf_size = hdev->tx_buf_size;
2026                 } else {
2027                         priv->tx_buf_size = 0;
2028                 }
2029
2030                 total_size -= priv->tx_buf_size;
2031         }
2032
2033         return 0;
2034 }
2035
2036 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2037                                   struct hclge_pkt_buf_alloc *buf_alloc)
2038 {
2039         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2040         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2041         unsigned int i;
2042
2043         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2044                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2045
2046                 priv->enable = 0;
2047                 priv->wl.low = 0;
2048                 priv->wl.high = 0;
2049                 priv->buf_size = 0;
2050
2051                 if (!(hdev->hw_tc_map & BIT(i)))
2052                         continue;
2053
2054                 priv->enable = 1;
2055
2056                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2057                         priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2058                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
2059                                                 HCLGE_BUF_SIZE_UNIT);
2060                 } else {
2061                         priv->wl.low = 0;
2062                         priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2063                                         aligned_mps;
2064                 }
2065
2066                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2067         }
2068
2069         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2070 }
2071
2072 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2073                                           struct hclge_pkt_buf_alloc *buf_alloc)
2074 {
2075         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2076         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2077         int i;
2078
2079         /* let the last to be cleared first */
2080         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2081                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2082                 unsigned int mask = BIT((unsigned int)i);
2083
2084                 if (hdev->hw_tc_map & mask &&
2085                     !(hdev->tm_info.hw_pfc_map & mask)) {
2086                         /* Clear the no pfc TC private buffer */
2087                         priv->wl.low = 0;
2088                         priv->wl.high = 0;
2089                         priv->buf_size = 0;
2090                         priv->enable = 0;
2091                         no_pfc_priv_num--;
2092                 }
2093
2094                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2095                     no_pfc_priv_num == 0)
2096                         break;
2097         }
2098
2099         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2100 }
2101
2102 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2103                                         struct hclge_pkt_buf_alloc *buf_alloc)
2104 {
2105         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2106         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2107         int i;
2108
2109         /* let the last to be cleared first */
2110         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2111                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2112                 unsigned int mask = BIT((unsigned int)i);
2113
2114                 if (hdev->hw_tc_map & mask &&
2115                     hdev->tm_info.hw_pfc_map & mask) {
2116                         /* Reduce the number of pfc TC with private buffer */
2117                         priv->wl.low = 0;
2118                         priv->enable = 0;
2119                         priv->wl.high = 0;
2120                         priv->buf_size = 0;
2121                         pfc_priv_num--;
2122                 }
2123
2124                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2125                     pfc_priv_num == 0)
2126                         break;
2127         }
2128
2129         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2130 }
2131
2132 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2133                                       struct hclge_pkt_buf_alloc *buf_alloc)
2134 {
2135 #define COMPENSATE_BUFFER       0x3C00
2136 #define COMPENSATE_HALF_MPS_NUM 5
2137 #define PRIV_WL_GAP             0x1800
2138
2139         u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2140         u32 tc_num = hclge_get_tc_num(hdev);
2141         u32 half_mps = hdev->mps >> 1;
2142         u32 min_rx_priv;
2143         unsigned int i;
2144
2145         if (tc_num)
2146                 rx_priv = rx_priv / tc_num;
2147
2148         if (tc_num <= NEED_RESERVE_TC_NUM)
2149                 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2150
2151         min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2152                         COMPENSATE_HALF_MPS_NUM * half_mps;
2153         min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2154         rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2155
2156         if (rx_priv < min_rx_priv)
2157                 return false;
2158
2159         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2160                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2161
2162                 priv->enable = 0;
2163                 priv->wl.low = 0;
2164                 priv->wl.high = 0;
2165                 priv->buf_size = 0;
2166
2167                 if (!(hdev->hw_tc_map & BIT(i)))
2168                         continue;
2169
2170                 priv->enable = 1;
2171                 priv->buf_size = rx_priv;
2172                 priv->wl.high = rx_priv - hdev->dv_buf_size;
2173                 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2174         }
2175
2176         buf_alloc->s_buf.buf_size = 0;
2177
2178         return true;
2179 }
2180
2181 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2182  * @hdev: pointer to struct hclge_dev
2183  * @buf_alloc: pointer to buffer calculation data
2184  * @return: 0: calculate sucessful, negative: fail
2185  */
2186 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2187                                 struct hclge_pkt_buf_alloc *buf_alloc)
2188 {
2189         /* When DCB is not supported, rx private buffer is not allocated. */
2190         if (!hnae3_dev_dcb_supported(hdev)) {
2191                 u32 rx_all = hdev->pkt_buf_size;
2192
2193                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2194                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2195                         return -ENOMEM;
2196
2197                 return 0;
2198         }
2199
2200         if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2201                 return 0;
2202
2203         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2204                 return 0;
2205
2206         /* try to decrease the buffer size */
2207         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2208                 return 0;
2209
2210         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2211                 return 0;
2212
2213         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2214                 return 0;
2215
2216         return -ENOMEM;
2217 }
2218
2219 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2220                                    struct hclge_pkt_buf_alloc *buf_alloc)
2221 {
2222         struct hclge_rx_priv_buff_cmd *req;
2223         struct hclge_desc desc;
2224         int ret;
2225         int i;
2226
2227         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2228         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2229
2230         /* Alloc private buffer TCs */
2231         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2232                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2233
2234                 req->buf_num[i] =
2235                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2236                 req->buf_num[i] |=
2237                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2238         }
2239
2240         req->shared_buf =
2241                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2242                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
2243
2244         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2245         if (ret)
2246                 dev_err(&hdev->pdev->dev,
2247                         "rx private buffer alloc cmd failed %d\n", ret);
2248
2249         return ret;
2250 }
2251
2252 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2253                                    struct hclge_pkt_buf_alloc *buf_alloc)
2254 {
2255         struct hclge_rx_priv_wl_buf *req;
2256         struct hclge_priv_buf *priv;
2257         struct hclge_desc desc[2];
2258         int i, j;
2259         int ret;
2260
2261         for (i = 0; i < 2; i++) {
2262                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2263                                            false);
2264                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2265
2266                 /* The first descriptor set the NEXT bit to 1 */
2267                 if (i == 0)
2268                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2269                 else
2270                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2271
2272                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2273                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2274
2275                         priv = &buf_alloc->priv_buf[idx];
2276                         req->tc_wl[j].high =
2277                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2278                         req->tc_wl[j].high |=
2279                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2280                         req->tc_wl[j].low =
2281                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2282                         req->tc_wl[j].low |=
2283                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2284                 }
2285         }
2286
2287         /* Send 2 descriptor at one time */
2288         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2289         if (ret)
2290                 dev_err(&hdev->pdev->dev,
2291                         "rx private waterline config cmd failed %d\n",
2292                         ret);
2293         return ret;
2294 }
2295
2296 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2297                                     struct hclge_pkt_buf_alloc *buf_alloc)
2298 {
2299         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2300         struct hclge_rx_com_thrd *req;
2301         struct hclge_desc desc[2];
2302         struct hclge_tc_thrd *tc;
2303         int i, j;
2304         int ret;
2305
2306         for (i = 0; i < 2; i++) {
2307                 hclge_cmd_setup_basic_desc(&desc[i],
2308                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2309                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2310
2311                 /* The first descriptor set the NEXT bit to 1 */
2312                 if (i == 0)
2313                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2314                 else
2315                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2316
2317                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2318                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2319
2320                         req->com_thrd[j].high =
2321                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2322                         req->com_thrd[j].high |=
2323                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2324                         req->com_thrd[j].low =
2325                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2326                         req->com_thrd[j].low |=
2327                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2328                 }
2329         }
2330
2331         /* Send 2 descriptors at one time */
2332         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2333         if (ret)
2334                 dev_err(&hdev->pdev->dev,
2335                         "common threshold config cmd failed %d\n", ret);
2336         return ret;
2337 }
2338
2339 static int hclge_common_wl_config(struct hclge_dev *hdev,
2340                                   struct hclge_pkt_buf_alloc *buf_alloc)
2341 {
2342         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2343         struct hclge_rx_com_wl *req;
2344         struct hclge_desc desc;
2345         int ret;
2346
2347         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2348
2349         req = (struct hclge_rx_com_wl *)desc.data;
2350         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2351         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2352
2353         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2354         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2355
2356         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2357         if (ret)
2358                 dev_err(&hdev->pdev->dev,
2359                         "common waterline config cmd failed %d\n", ret);
2360
2361         return ret;
2362 }
2363
2364 int hclge_buffer_alloc(struct hclge_dev *hdev)
2365 {
2366         struct hclge_pkt_buf_alloc *pkt_buf;
2367         int ret;
2368
2369         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2370         if (!pkt_buf)
2371                 return -ENOMEM;
2372
2373         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2374         if (ret) {
2375                 dev_err(&hdev->pdev->dev,
2376                         "could not calc tx buffer size for all TCs %d\n", ret);
2377                 goto out;
2378         }
2379
2380         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2381         if (ret) {
2382                 dev_err(&hdev->pdev->dev,
2383                         "could not alloc tx buffers %d\n", ret);
2384                 goto out;
2385         }
2386
2387         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2388         if (ret) {
2389                 dev_err(&hdev->pdev->dev,
2390                         "could not calc rx priv buffer size for all TCs %d\n",
2391                         ret);
2392                 goto out;
2393         }
2394
2395         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2396         if (ret) {
2397                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2398                         ret);
2399                 goto out;
2400         }
2401
2402         if (hnae3_dev_dcb_supported(hdev)) {
2403                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2404                 if (ret) {
2405                         dev_err(&hdev->pdev->dev,
2406                                 "could not configure rx private waterline %d\n",
2407                                 ret);
2408                         goto out;
2409                 }
2410
2411                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2412                 if (ret) {
2413                         dev_err(&hdev->pdev->dev,
2414                                 "could not configure common threshold %d\n",
2415                                 ret);
2416                         goto out;
2417                 }
2418         }
2419
2420         ret = hclge_common_wl_config(hdev, pkt_buf);
2421         if (ret)
2422                 dev_err(&hdev->pdev->dev,
2423                         "could not configure common waterline %d\n", ret);
2424
2425 out:
2426         kfree(pkt_buf);
2427         return ret;
2428 }
2429
2430 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2431 {
2432         struct hnae3_handle *roce = &vport->roce;
2433         struct hnae3_handle *nic = &vport->nic;
2434         struct hclge_dev *hdev = vport->back;
2435
2436         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2437
2438         if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2439                 return -EINVAL;
2440
2441         roce->rinfo.base_vector = hdev->roce_base_vector;
2442
2443         roce->rinfo.netdev = nic->kinfo.netdev;
2444         roce->rinfo.roce_io_base = hdev->hw.io_base;
2445         roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2446
2447         roce->pdev = nic->pdev;
2448         roce->ae_algo = nic->ae_algo;
2449         roce->numa_node_mask = nic->numa_node_mask;
2450
2451         return 0;
2452 }
2453
2454 static int hclge_init_msi(struct hclge_dev *hdev)
2455 {
2456         struct pci_dev *pdev = hdev->pdev;
2457         int vectors;
2458         int i;
2459
2460         vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2461                                         hdev->num_msi,
2462                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2463         if (vectors < 0) {
2464                 dev_err(&pdev->dev,
2465                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2466                         vectors);
2467                 return vectors;
2468         }
2469         if (vectors < hdev->num_msi)
2470                 dev_warn(&hdev->pdev->dev,
2471                          "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2472                          hdev->num_msi, vectors);
2473
2474         hdev->num_msi = vectors;
2475         hdev->num_msi_left = vectors;
2476
2477         hdev->base_msi_vector = pdev->irq;
2478         hdev->roce_base_vector = hdev->base_msi_vector +
2479                                 hdev->num_nic_msi;
2480
2481         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2482                                            sizeof(u16), GFP_KERNEL);
2483         if (!hdev->vector_status) {
2484                 pci_free_irq_vectors(pdev);
2485                 return -ENOMEM;
2486         }
2487
2488         for (i = 0; i < hdev->num_msi; i++)
2489                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2490
2491         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2492                                         sizeof(int), GFP_KERNEL);
2493         if (!hdev->vector_irq) {
2494                 pci_free_irq_vectors(pdev);
2495                 return -ENOMEM;
2496         }
2497
2498         return 0;
2499 }
2500
2501 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2502 {
2503         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2504                 duplex = HCLGE_MAC_FULL;
2505
2506         return duplex;
2507 }
2508
2509 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2510                                       u8 duplex)
2511 {
2512         struct hclge_config_mac_speed_dup_cmd *req;
2513         struct hclge_desc desc;
2514         int ret;
2515
2516         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2517
2518         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2519
2520         if (duplex)
2521                 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2522
2523         switch (speed) {
2524         case HCLGE_MAC_SPEED_10M:
2525                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2526                                 HCLGE_CFG_SPEED_S, 6);
2527                 break;
2528         case HCLGE_MAC_SPEED_100M:
2529                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2530                                 HCLGE_CFG_SPEED_S, 7);
2531                 break;
2532         case HCLGE_MAC_SPEED_1G:
2533                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2534                                 HCLGE_CFG_SPEED_S, 0);
2535                 break;
2536         case HCLGE_MAC_SPEED_10G:
2537                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2538                                 HCLGE_CFG_SPEED_S, 1);
2539                 break;
2540         case HCLGE_MAC_SPEED_25G:
2541                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2542                                 HCLGE_CFG_SPEED_S, 2);
2543                 break;
2544         case HCLGE_MAC_SPEED_40G:
2545                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2546                                 HCLGE_CFG_SPEED_S, 3);
2547                 break;
2548         case HCLGE_MAC_SPEED_50G:
2549                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2550                                 HCLGE_CFG_SPEED_S, 4);
2551                 break;
2552         case HCLGE_MAC_SPEED_100G:
2553                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2554                                 HCLGE_CFG_SPEED_S, 5);
2555                 break;
2556         case HCLGE_MAC_SPEED_200G:
2557                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2558                                 HCLGE_CFG_SPEED_S, 8);
2559                 break;
2560         default:
2561                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2562                 return -EINVAL;
2563         }
2564
2565         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2566                       1);
2567
2568         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2569         if (ret) {
2570                 dev_err(&hdev->pdev->dev,
2571                         "mac speed/duplex config cmd failed %d.\n", ret);
2572                 return ret;
2573         }
2574
2575         return 0;
2576 }
2577
2578 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2579 {
2580         struct hclge_mac *mac = &hdev->hw.mac;
2581         int ret;
2582
2583         duplex = hclge_check_speed_dup(duplex, speed);
2584         if (!mac->support_autoneg && mac->speed == speed &&
2585             mac->duplex == duplex)
2586                 return 0;
2587
2588         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2589         if (ret)
2590                 return ret;
2591
2592         hdev->hw.mac.speed = speed;
2593         hdev->hw.mac.duplex = duplex;
2594
2595         return 0;
2596 }
2597
2598 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2599                                      u8 duplex)
2600 {
2601         struct hclge_vport *vport = hclge_get_vport(handle);
2602         struct hclge_dev *hdev = vport->back;
2603
2604         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2605 }
2606
2607 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2608 {
2609         struct hclge_config_auto_neg_cmd *req;
2610         struct hclge_desc desc;
2611         u32 flag = 0;
2612         int ret;
2613
2614         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2615
2616         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2617         if (enable)
2618                 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2619         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2620
2621         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2622         if (ret)
2623                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2624                         ret);
2625
2626         return ret;
2627 }
2628
2629 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2630 {
2631         struct hclge_vport *vport = hclge_get_vport(handle);
2632         struct hclge_dev *hdev = vport->back;
2633
2634         if (!hdev->hw.mac.support_autoneg) {
2635                 if (enable) {
2636                         dev_err(&hdev->pdev->dev,
2637                                 "autoneg is not supported by current port\n");
2638                         return -EOPNOTSUPP;
2639                 } else {
2640                         return 0;
2641                 }
2642         }
2643
2644         return hclge_set_autoneg_en(hdev, enable);
2645 }
2646
2647 static int hclge_get_autoneg(struct hnae3_handle *handle)
2648 {
2649         struct hclge_vport *vport = hclge_get_vport(handle);
2650         struct hclge_dev *hdev = vport->back;
2651         struct phy_device *phydev = hdev->hw.mac.phydev;
2652
2653         if (phydev)
2654                 return phydev->autoneg;
2655
2656         return hdev->hw.mac.autoneg;
2657 }
2658
2659 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2660 {
2661         struct hclge_vport *vport = hclge_get_vport(handle);
2662         struct hclge_dev *hdev = vport->back;
2663         int ret;
2664
2665         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2666
2667         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2668         if (ret)
2669                 return ret;
2670         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2671 }
2672
2673 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2674 {
2675         struct hclge_vport *vport = hclge_get_vport(handle);
2676         struct hclge_dev *hdev = vport->back;
2677
2678         if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2679                 return hclge_set_autoneg_en(hdev, !halt);
2680
2681         return 0;
2682 }
2683
2684 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2685 {
2686         struct hclge_config_fec_cmd *req;
2687         struct hclge_desc desc;
2688         int ret;
2689
2690         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2691
2692         req = (struct hclge_config_fec_cmd *)desc.data;
2693         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2694                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2695         if (fec_mode & BIT(HNAE3_FEC_RS))
2696                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2697                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2698         if (fec_mode & BIT(HNAE3_FEC_BASER))
2699                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2700                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2701
2702         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2703         if (ret)
2704                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2705
2706         return ret;
2707 }
2708
2709 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2710 {
2711         struct hclge_vport *vport = hclge_get_vport(handle);
2712         struct hclge_dev *hdev = vport->back;
2713         struct hclge_mac *mac = &hdev->hw.mac;
2714         int ret;
2715
2716         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2717                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2718                 return -EINVAL;
2719         }
2720
2721         ret = hclge_set_fec_hw(hdev, fec_mode);
2722         if (ret)
2723                 return ret;
2724
2725         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2726         return 0;
2727 }
2728
2729 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2730                           u8 *fec_mode)
2731 {
2732         struct hclge_vport *vport = hclge_get_vport(handle);
2733         struct hclge_dev *hdev = vport->back;
2734         struct hclge_mac *mac = &hdev->hw.mac;
2735
2736         if (fec_ability)
2737                 *fec_ability = mac->fec_ability;
2738         if (fec_mode)
2739                 *fec_mode = mac->fec_mode;
2740 }
2741
2742 static int hclge_mac_init(struct hclge_dev *hdev)
2743 {
2744         struct hclge_mac *mac = &hdev->hw.mac;
2745         int ret;
2746
2747         hdev->support_sfp_query = true;
2748         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2749         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2750                                          hdev->hw.mac.duplex);
2751         if (ret)
2752                 return ret;
2753
2754         if (hdev->hw.mac.support_autoneg) {
2755                 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2756                 if (ret)
2757                         return ret;
2758         }
2759
2760         mac->link = 0;
2761
2762         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2763                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2764                 if (ret)
2765                         return ret;
2766         }
2767
2768         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2769         if (ret) {
2770                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2771                 return ret;
2772         }
2773
2774         ret = hclge_set_default_loopback(hdev);
2775         if (ret)
2776                 return ret;
2777
2778         ret = hclge_buffer_alloc(hdev);
2779         if (ret)
2780                 dev_err(&hdev->pdev->dev,
2781                         "allocate buffer fail, ret=%d\n", ret);
2782
2783         return ret;
2784 }
2785
2786 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2787 {
2788         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2789             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2790                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2791                                     hclge_wq, &hdev->service_task, 0);
2792 }
2793
2794 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2795 {
2796         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2797             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2798                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2799                                     hclge_wq, &hdev->service_task, 0);
2800 }
2801
2802 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2803 {
2804         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2805             !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2806                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2807                                     hclge_wq, &hdev->service_task,
2808                                     delay_time);
2809 }
2810
2811 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2812 {
2813         struct hclge_link_status_cmd *req;
2814         struct hclge_desc desc;
2815         int ret;
2816
2817         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2818         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2819         if (ret) {
2820                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2821                         ret);
2822                 return ret;
2823         }
2824
2825         req = (struct hclge_link_status_cmd *)desc.data;
2826         *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2827                 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2828
2829         return 0;
2830 }
2831
2832 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2833 {
2834         struct phy_device *phydev = hdev->hw.mac.phydev;
2835
2836         *link_status = HCLGE_LINK_STATUS_DOWN;
2837
2838         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2839                 return 0;
2840
2841         if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2842                 return 0;
2843
2844         return hclge_get_mac_link_status(hdev, link_status);
2845 }
2846
2847 static void hclge_update_link_status(struct hclge_dev *hdev)
2848 {
2849         struct hnae3_client *rclient = hdev->roce_client;
2850         struct hnae3_client *client = hdev->nic_client;
2851         struct hnae3_handle *rhandle;
2852         struct hnae3_handle *handle;
2853         int state;
2854         int ret;
2855         int i;
2856
2857         if (!client)
2858                 return;
2859
2860         if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2861                 return;
2862
2863         ret = hclge_get_mac_phy_link(hdev, &state);
2864         if (ret) {
2865                 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2866                 return;
2867         }
2868
2869         if (state != hdev->hw.mac.link) {
2870                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2871                         handle = &hdev->vport[i].nic;
2872                         client->ops->link_status_change(handle, state);
2873                         hclge_config_mac_tnl_int(hdev, state);
2874                         rhandle = &hdev->vport[i].roce;
2875                         if (rclient && rclient->ops->link_status_change)
2876                                 rclient->ops->link_status_change(rhandle,
2877                                                                  state);
2878                 }
2879                 hdev->hw.mac.link = state;
2880         }
2881
2882         clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2883 }
2884
2885 static void hclge_update_port_capability(struct hclge_mac *mac)
2886 {
2887         /* update fec ability by speed */
2888         hclge_convert_setting_fec(mac);
2889
2890         /* firmware can not identify back plane type, the media type
2891          * read from configuration can help deal it
2892          */
2893         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2894             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2895                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2896         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2897                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2898
2899         if (mac->support_autoneg) {
2900                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2901                 linkmode_copy(mac->advertising, mac->supported);
2902         } else {
2903                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2904                                    mac->supported);
2905                 linkmode_zero(mac->advertising);
2906         }
2907 }
2908
2909 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2910 {
2911         struct hclge_sfp_info_cmd *resp;
2912         struct hclge_desc desc;
2913         int ret;
2914
2915         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2916         resp = (struct hclge_sfp_info_cmd *)desc.data;
2917         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2918         if (ret == -EOPNOTSUPP) {
2919                 dev_warn(&hdev->pdev->dev,
2920                          "IMP do not support get SFP speed %d\n", ret);
2921                 return ret;
2922         } else if (ret) {
2923                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2924                 return ret;
2925         }
2926
2927         *speed = le32_to_cpu(resp->speed);
2928
2929         return 0;
2930 }
2931
2932 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2933 {
2934         struct hclge_sfp_info_cmd *resp;
2935         struct hclge_desc desc;
2936         int ret;
2937
2938         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2939         resp = (struct hclge_sfp_info_cmd *)desc.data;
2940
2941         resp->query_type = QUERY_ACTIVE_SPEED;
2942
2943         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2944         if (ret == -EOPNOTSUPP) {
2945                 dev_warn(&hdev->pdev->dev,
2946                          "IMP does not support get SFP info %d\n", ret);
2947                 return ret;
2948         } else if (ret) {
2949                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2950                 return ret;
2951         }
2952
2953         /* In some case, mac speed get from IMP may be 0, it shouldn't be
2954          * set to mac->speed.
2955          */
2956         if (!le32_to_cpu(resp->speed))
2957                 return 0;
2958
2959         mac->speed = le32_to_cpu(resp->speed);
2960         /* if resp->speed_ability is 0, it means it's an old version
2961          * firmware, do not update these params
2962          */
2963         if (resp->speed_ability) {
2964                 mac->module_type = le32_to_cpu(resp->module_type);
2965                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2966                 mac->autoneg = resp->autoneg;
2967                 mac->support_autoneg = resp->autoneg_ability;
2968                 mac->speed_type = QUERY_ACTIVE_SPEED;
2969                 if (!resp->active_fec)
2970                         mac->fec_mode = 0;
2971                 else
2972                         mac->fec_mode = BIT(resp->active_fec);
2973         } else {
2974                 mac->speed_type = QUERY_SFP_SPEED;
2975         }
2976
2977         return 0;
2978 }
2979
2980 static int hclge_update_port_info(struct hclge_dev *hdev)
2981 {
2982         struct hclge_mac *mac = &hdev->hw.mac;
2983         int speed = HCLGE_MAC_SPEED_UNKNOWN;
2984         int ret;
2985
2986         /* get the port info from SFP cmd if not copper port */
2987         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2988                 return 0;
2989
2990         /* if IMP does not support get SFP/qSFP info, return directly */
2991         if (!hdev->support_sfp_query)
2992                 return 0;
2993
2994         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
2995                 ret = hclge_get_sfp_info(hdev, mac);
2996         else
2997                 ret = hclge_get_sfp_speed(hdev, &speed);
2998
2999         if (ret == -EOPNOTSUPP) {
3000                 hdev->support_sfp_query = false;
3001                 return ret;
3002         } else if (ret) {
3003                 return ret;
3004         }
3005
3006         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3007                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3008                         hclge_update_port_capability(mac);
3009                         return 0;
3010                 }
3011                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3012                                                HCLGE_MAC_FULL);
3013         } else {
3014                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3015                         return 0; /* do nothing if no SFP */
3016
3017                 /* must config full duplex for SFP */
3018                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3019         }
3020 }
3021
3022 static int hclge_get_status(struct hnae3_handle *handle)
3023 {
3024         struct hclge_vport *vport = hclge_get_vport(handle);
3025         struct hclge_dev *hdev = vport->back;
3026
3027         hclge_update_link_status(hdev);
3028
3029         return hdev->hw.mac.link;
3030 }
3031
3032 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3033 {
3034         if (!pci_num_vf(hdev->pdev)) {
3035                 dev_err(&hdev->pdev->dev,
3036                         "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3037                 return NULL;
3038         }
3039
3040         if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3041                 dev_err(&hdev->pdev->dev,
3042                         "vf id(%d) is out of range(0 <= vfid < %d)\n",
3043                         vf, pci_num_vf(hdev->pdev));
3044                 return NULL;
3045         }
3046
3047         /* VF start from 1 in vport */
3048         vf += HCLGE_VF_VPORT_START_NUM;
3049         return &hdev->vport[vf];
3050 }
3051
3052 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3053                                struct ifla_vf_info *ivf)
3054 {
3055         struct hclge_vport *vport = hclge_get_vport(handle);
3056         struct hclge_dev *hdev = vport->back;
3057
3058         vport = hclge_get_vf_vport(hdev, vf);
3059         if (!vport)
3060                 return -EINVAL;
3061
3062         ivf->vf = vf;
3063         ivf->linkstate = vport->vf_info.link_state;
3064         ivf->spoofchk = vport->vf_info.spoofchk;
3065         ivf->trusted = vport->vf_info.trusted;
3066         ivf->min_tx_rate = 0;
3067         ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3068         ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3069         ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3070         ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3071         ether_addr_copy(ivf->mac, vport->vf_info.mac);
3072
3073         return 0;
3074 }
3075
3076 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3077                                    int link_state)
3078 {
3079         struct hclge_vport *vport = hclge_get_vport(handle);
3080         struct hclge_dev *hdev = vport->back;
3081
3082         vport = hclge_get_vf_vport(hdev, vf);
3083         if (!vport)
3084                 return -EINVAL;
3085
3086         vport->vf_info.link_state = link_state;
3087
3088         return 0;
3089 }
3090
3091 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3092 {
3093         u32 cmdq_src_reg, msix_src_reg;
3094
3095         /* fetch the events from their corresponding regs */
3096         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3097         msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3098
3099         /* Assumption: If by any chance reset and mailbox events are reported
3100          * together then we will only process reset event in this go and will
3101          * defer the processing of the mailbox events. Since, we would have not
3102          * cleared RX CMDQ event this time we would receive again another
3103          * interrupt from H/W just for the mailbox.
3104          *
3105          * check for vector0 reset event sources
3106          */
3107         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3108                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3109                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3110                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3111                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3112                 hdev->rst_stats.imp_rst_cnt++;
3113                 return HCLGE_VECTOR0_EVENT_RST;
3114         }
3115
3116         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3117                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3118                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3119                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3120                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3121                 hdev->rst_stats.global_rst_cnt++;
3122                 return HCLGE_VECTOR0_EVENT_RST;
3123         }
3124
3125         /* check for vector0 msix event source */
3126         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3127                 *clearval = msix_src_reg;
3128                 return HCLGE_VECTOR0_EVENT_ERR;
3129         }
3130
3131         /* check for vector0 mailbox(=CMDQ RX) event source */
3132         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3133                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3134                 *clearval = cmdq_src_reg;
3135                 return HCLGE_VECTOR0_EVENT_MBX;
3136         }
3137
3138         /* print other vector0 event source */
3139         dev_info(&hdev->pdev->dev,
3140                  "CMDQ INT status:0x%x, other INT status:0x%x\n",
3141                  cmdq_src_reg, msix_src_reg);
3142         *clearval = msix_src_reg;
3143
3144         return HCLGE_VECTOR0_EVENT_OTHER;
3145 }
3146
3147 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3148                                     u32 regclr)
3149 {
3150         switch (event_type) {
3151         case HCLGE_VECTOR0_EVENT_RST:
3152                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3153                 break;
3154         case HCLGE_VECTOR0_EVENT_MBX:
3155                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3156                 break;
3157         default:
3158                 break;
3159         }
3160 }
3161
3162 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3163 {
3164         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3165                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3166                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3167                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3168         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3169 }
3170
3171 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3172 {
3173         writel(enable ? 1 : 0, vector->addr);
3174 }
3175
3176 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3177 {
3178         struct hclge_dev *hdev = data;
3179         u32 clearval = 0;
3180         u32 event_cause;
3181
3182         hclge_enable_vector(&hdev->misc_vector, false);
3183         event_cause = hclge_check_event_cause(hdev, &clearval);
3184
3185         /* vector 0 interrupt is shared with reset and mailbox source events.*/
3186         switch (event_cause) {
3187         case HCLGE_VECTOR0_EVENT_ERR:
3188                 /* we do not know what type of reset is required now. This could
3189                  * only be decided after we fetch the type of errors which
3190                  * caused this event. Therefore, we will do below for now:
3191                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3192                  *    have defered type of reset to be used.
3193                  * 2. Schedule the reset serivce task.
3194                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3195                  *    will fetch the correct type of reset.  This would be done
3196                  *    by first decoding the types of errors.
3197                  */
3198                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3199                 fallthrough;
3200         case HCLGE_VECTOR0_EVENT_RST:
3201                 hclge_reset_task_schedule(hdev);
3202                 break;
3203         case HCLGE_VECTOR0_EVENT_MBX:
3204                 /* If we are here then,
3205                  * 1. Either we are not handling any mbx task and we are not
3206                  *    scheduled as well
3207                  *                        OR
3208                  * 2. We could be handling a mbx task but nothing more is
3209                  *    scheduled.
3210                  * In both cases, we should schedule mbx task as there are more
3211                  * mbx messages reported by this interrupt.
3212                  */
3213                 hclge_mbx_task_schedule(hdev);
3214                 break;
3215         default:
3216                 dev_warn(&hdev->pdev->dev,
3217                          "received unknown or unhandled event of vector0\n");
3218                 break;
3219         }
3220
3221         hclge_clear_event_cause(hdev, event_cause, clearval);
3222
3223         /* Enable interrupt if it is not cause by reset. And when
3224          * clearval equal to 0, it means interrupt status may be
3225          * cleared by hardware before driver reads status register.
3226          * For this case, vector0 interrupt also should be enabled.
3227          */
3228         if (!clearval ||
3229             event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3230                 hclge_enable_vector(&hdev->misc_vector, true);
3231         }
3232
3233         return IRQ_HANDLED;
3234 }
3235
3236 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3237 {
3238         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3239                 dev_warn(&hdev->pdev->dev,
3240                          "vector(vector_id %d) has been freed.\n", vector_id);
3241                 return;
3242         }
3243
3244         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3245         hdev->num_msi_left += 1;
3246         hdev->num_msi_used -= 1;
3247 }
3248
3249 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3250 {
3251         struct hclge_misc_vector *vector = &hdev->misc_vector;
3252
3253         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3254
3255         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3256         hdev->vector_status[0] = 0;
3257
3258         hdev->num_msi_left -= 1;
3259         hdev->num_msi_used += 1;
3260 }
3261
3262 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3263                                       const cpumask_t *mask)
3264 {
3265         struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3266                                               affinity_notify);
3267
3268         cpumask_copy(&hdev->affinity_mask, mask);
3269 }
3270
3271 static void hclge_irq_affinity_release(struct kref *ref)
3272 {
3273 }
3274
3275 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3276 {
3277         irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3278                               &hdev->affinity_mask);
3279
3280         hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3281         hdev->affinity_notify.release = hclge_irq_affinity_release;
3282         irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3283                                   &hdev->affinity_notify);
3284 }
3285
3286 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3287 {
3288         irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3289         irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3290 }
3291
3292 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3293 {
3294         int ret;
3295
3296         hclge_get_misc_vector(hdev);
3297
3298         /* this would be explicitly freed in the end */
3299         snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3300                  HCLGE_NAME, pci_name(hdev->pdev));
3301         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3302                           0, hdev->misc_vector.name, hdev);
3303         if (ret) {
3304                 hclge_free_vector(hdev, 0);
3305                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3306                         hdev->misc_vector.vector_irq);
3307         }
3308
3309         return ret;
3310 }
3311
3312 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3313 {
3314         free_irq(hdev->misc_vector.vector_irq, hdev);
3315         hclge_free_vector(hdev, 0);
3316 }
3317
3318 int hclge_notify_client(struct hclge_dev *hdev,
3319                         enum hnae3_reset_notify_type type)
3320 {
3321         struct hnae3_client *client = hdev->nic_client;
3322         u16 i;
3323
3324         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3325                 return 0;
3326
3327         if (!client->ops->reset_notify)
3328                 return -EOPNOTSUPP;
3329
3330         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3331                 struct hnae3_handle *handle = &hdev->vport[i].nic;
3332                 int ret;
3333
3334                 ret = client->ops->reset_notify(handle, type);
3335                 if (ret) {
3336                         dev_err(&hdev->pdev->dev,
3337                                 "notify nic client failed %d(%d)\n", type, ret);
3338                         return ret;
3339                 }
3340         }
3341
3342         return 0;
3343 }
3344
3345 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3346                                     enum hnae3_reset_notify_type type)
3347 {
3348         struct hnae3_client *client = hdev->roce_client;
3349         int ret;
3350         u16 i;
3351
3352         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3353                 return 0;
3354
3355         if (!client->ops->reset_notify)
3356                 return -EOPNOTSUPP;
3357
3358         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3359                 struct hnae3_handle *handle = &hdev->vport[i].roce;
3360
3361                 ret = client->ops->reset_notify(handle, type);
3362                 if (ret) {
3363                         dev_err(&hdev->pdev->dev,
3364                                 "notify roce client failed %d(%d)",
3365                                 type, ret);
3366                         return ret;
3367                 }
3368         }
3369
3370         return ret;
3371 }
3372
3373 static int hclge_reset_wait(struct hclge_dev *hdev)
3374 {
3375 #define HCLGE_RESET_WATI_MS     100
3376 #define HCLGE_RESET_WAIT_CNT    350
3377
3378         u32 val, reg, reg_bit;
3379         u32 cnt = 0;
3380
3381         switch (hdev->reset_type) {
3382         case HNAE3_IMP_RESET:
3383                 reg = HCLGE_GLOBAL_RESET_REG;
3384                 reg_bit = HCLGE_IMP_RESET_BIT;
3385                 break;
3386         case HNAE3_GLOBAL_RESET:
3387                 reg = HCLGE_GLOBAL_RESET_REG;
3388                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3389                 break;
3390         case HNAE3_FUNC_RESET:
3391                 reg = HCLGE_FUN_RST_ING;
3392                 reg_bit = HCLGE_FUN_RST_ING_B;
3393                 break;
3394         default:
3395                 dev_err(&hdev->pdev->dev,
3396                         "Wait for unsupported reset type: %d\n",
3397                         hdev->reset_type);
3398                 return -EINVAL;
3399         }
3400
3401         val = hclge_read_dev(&hdev->hw, reg);
3402         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3403                 msleep(HCLGE_RESET_WATI_MS);
3404                 val = hclge_read_dev(&hdev->hw, reg);
3405                 cnt++;
3406         }
3407
3408         if (cnt >= HCLGE_RESET_WAIT_CNT) {
3409                 dev_warn(&hdev->pdev->dev,
3410                          "Wait for reset timeout: %d\n", hdev->reset_type);
3411                 return -EBUSY;
3412         }
3413
3414         return 0;
3415 }
3416
3417 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3418 {
3419         struct hclge_vf_rst_cmd *req;
3420         struct hclge_desc desc;
3421
3422         req = (struct hclge_vf_rst_cmd *)desc.data;
3423         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3424         req->dest_vfid = func_id;
3425
3426         if (reset)
3427                 req->vf_rst = 0x1;
3428
3429         return hclge_cmd_send(&hdev->hw, &desc, 1);
3430 }
3431
3432 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3433 {
3434         int i;
3435
3436         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3437                 struct hclge_vport *vport = &hdev->vport[i];
3438                 int ret;
3439
3440                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3441                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3442                 if (ret) {
3443                         dev_err(&hdev->pdev->dev,
3444                                 "set vf(%u) rst failed %d!\n",
3445                                 vport->vport_id, ret);
3446                         return ret;
3447                 }
3448
3449                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3450                         continue;
3451
3452                 /* Inform VF to process the reset.
3453                  * hclge_inform_reset_assert_to_vf may fail if VF
3454                  * driver is not loaded.
3455                  */
3456                 ret = hclge_inform_reset_assert_to_vf(vport);
3457                 if (ret)
3458                         dev_warn(&hdev->pdev->dev,
3459                                  "inform reset to vf(%u) failed %d!\n",
3460                                  vport->vport_id, ret);
3461         }
3462
3463         return 0;
3464 }
3465
3466 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3467 {
3468         if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3469             test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3470             test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3471                 return;
3472
3473         hclge_mbx_handler(hdev);
3474
3475         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3476 }
3477
3478 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3479 {
3480         struct hclge_pf_rst_sync_cmd *req;
3481         struct hclge_desc desc;
3482         int cnt = 0;
3483         int ret;
3484
3485         req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3486         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3487
3488         do {
3489                 /* vf need to down netdev by mbx during PF or FLR reset */
3490                 hclge_mailbox_service_task(hdev);
3491
3492                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3493                 /* for compatible with old firmware, wait
3494                  * 100 ms for VF to stop IO
3495                  */
3496                 if (ret == -EOPNOTSUPP) {
3497                         msleep(HCLGE_RESET_SYNC_TIME);
3498                         return;
3499                 } else if (ret) {
3500                         dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3501                                  ret);
3502                         return;
3503                 } else if (req->all_vf_ready) {
3504                         return;
3505                 }
3506                 msleep(HCLGE_PF_RESET_SYNC_TIME);
3507                 hclge_cmd_reuse_desc(&desc, true);
3508         } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3509
3510         dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3511 }
3512
3513 void hclge_report_hw_error(struct hclge_dev *hdev,
3514                            enum hnae3_hw_error_type type)
3515 {
3516         struct hnae3_client *client = hdev->nic_client;
3517         u16 i;
3518
3519         if (!client || !client->ops->process_hw_error ||
3520             !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3521                 return;
3522
3523         for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3524                 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3525 }
3526
3527 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3528 {
3529         u32 reg_val;
3530
3531         reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3532         if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3533                 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3534                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3535                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3536         }
3537
3538         if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3539                 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3540                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3541                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3542         }
3543 }
3544
3545 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3546 {
3547         struct hclge_desc desc;
3548         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3549         int ret;
3550
3551         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3552         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3553         req->fun_reset_vfid = func_id;
3554
3555         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3556         if (ret)
3557                 dev_err(&hdev->pdev->dev,
3558                         "send function reset cmd fail, status =%d\n", ret);
3559
3560         return ret;
3561 }
3562
3563 static void hclge_do_reset(struct hclge_dev *hdev)
3564 {
3565         struct hnae3_handle *handle = &hdev->vport[0].nic;
3566         struct pci_dev *pdev = hdev->pdev;
3567         u32 val;
3568
3569         if (hclge_get_hw_reset_stat(handle)) {
3570                 dev_info(&pdev->dev, "hardware reset not finish\n");
3571                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3572                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3573                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3574                 return;
3575         }
3576
3577         switch (hdev->reset_type) {
3578         case HNAE3_GLOBAL_RESET:
3579                 dev_info(&pdev->dev, "global reset requested\n");
3580                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3581                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3582                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3583                 break;
3584         case HNAE3_FUNC_RESET:
3585                 dev_info(&pdev->dev, "PF reset requested\n");
3586                 /* schedule again to check later */
3587                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3588                 hclge_reset_task_schedule(hdev);
3589                 break;
3590         default:
3591                 dev_warn(&pdev->dev,
3592                          "unsupported reset type: %d\n", hdev->reset_type);
3593                 break;
3594         }
3595 }
3596
3597 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3598                                                    unsigned long *addr)
3599 {
3600         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3601         struct hclge_dev *hdev = ae_dev->priv;
3602
3603         /* first, resolve any unknown reset type to the known type(s) */
3604         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3605                 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3606                                         HCLGE_MISC_VECTOR_INT_STS);
3607                 /* we will intentionally ignore any errors from this function
3608                  *  as we will end up in *some* reset request in any case
3609                  */
3610                 if (hclge_handle_hw_msix_error(hdev, addr))
3611                         dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3612                                  msix_sts_reg);
3613
3614                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3615                 /* We defered the clearing of the error event which caused
3616                  * interrupt since it was not posssible to do that in
3617                  * interrupt context (and this is the reason we introduced
3618                  * new UNKNOWN reset type). Now, the errors have been
3619                  * handled and cleared in hardware we can safely enable
3620                  * interrupts. This is an exception to the norm.
3621                  */
3622                 hclge_enable_vector(&hdev->misc_vector, true);
3623         }
3624
3625         /* return the highest priority reset level amongst all */
3626         if (test_bit(HNAE3_IMP_RESET, addr)) {
3627                 rst_level = HNAE3_IMP_RESET;
3628                 clear_bit(HNAE3_IMP_RESET, addr);
3629                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3630                 clear_bit(HNAE3_FUNC_RESET, addr);
3631         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3632                 rst_level = HNAE3_GLOBAL_RESET;
3633                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3634                 clear_bit(HNAE3_FUNC_RESET, addr);
3635         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3636                 rst_level = HNAE3_FUNC_RESET;
3637                 clear_bit(HNAE3_FUNC_RESET, addr);
3638         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3639                 rst_level = HNAE3_FLR_RESET;
3640                 clear_bit(HNAE3_FLR_RESET, addr);
3641         }
3642
3643         if (hdev->reset_type != HNAE3_NONE_RESET &&
3644             rst_level < hdev->reset_type)
3645                 return HNAE3_NONE_RESET;
3646
3647         return rst_level;
3648 }
3649
3650 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3651 {
3652         u32 clearval = 0;
3653
3654         switch (hdev->reset_type) {
3655         case HNAE3_IMP_RESET:
3656                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3657                 break;
3658         case HNAE3_GLOBAL_RESET:
3659                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3660                 break;
3661         default:
3662                 break;
3663         }
3664
3665         if (!clearval)
3666                 return;
3667
3668         /* For revision 0x20, the reset interrupt source
3669          * can only be cleared after hardware reset done
3670          */
3671         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3672                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3673                                 clearval);
3674
3675         hclge_enable_vector(&hdev->misc_vector, true);
3676 }
3677
3678 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3679 {
3680         u32 reg_val;
3681
3682         reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3683         if (enable)
3684                 reg_val |= HCLGE_NIC_SW_RST_RDY;
3685         else
3686                 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3687
3688         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3689 }
3690
3691 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3692 {
3693         int ret;
3694
3695         ret = hclge_set_all_vf_rst(hdev, true);
3696         if (ret)
3697                 return ret;
3698
3699         hclge_func_reset_sync_vf(hdev);
3700
3701         return 0;
3702 }
3703
3704 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3705 {
3706         u32 reg_val;
3707         int ret = 0;
3708
3709         switch (hdev->reset_type) {
3710         case HNAE3_FUNC_RESET:
3711                 ret = hclge_func_reset_notify_vf(hdev);
3712                 if (ret)
3713                         return ret;
3714
3715                 ret = hclge_func_reset_cmd(hdev, 0);
3716                 if (ret) {
3717                         dev_err(&hdev->pdev->dev,
3718                                 "asserting function reset fail %d!\n", ret);
3719                         return ret;
3720                 }
3721
3722                 /* After performaning pf reset, it is not necessary to do the
3723                  * mailbox handling or send any command to firmware, because
3724                  * any mailbox handling or command to firmware is only valid
3725                  * after hclge_cmd_init is called.
3726                  */
3727                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3728                 hdev->rst_stats.pf_rst_cnt++;
3729                 break;
3730         case HNAE3_FLR_RESET:
3731                 ret = hclge_func_reset_notify_vf(hdev);
3732                 if (ret)
3733                         return ret;
3734                 break;
3735         case HNAE3_IMP_RESET:
3736                 hclge_handle_imp_error(hdev);
3737                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3738                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3739                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3740                 break;
3741         default:
3742                 break;
3743         }
3744
3745         /* inform hardware that preparatory work is done */
3746         msleep(HCLGE_RESET_SYNC_TIME);
3747         hclge_reset_handshake(hdev, true);
3748         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3749
3750         return ret;
3751 }
3752
3753 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3754 {
3755 #define MAX_RESET_FAIL_CNT 5
3756
3757         if (hdev->reset_pending) {
3758                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3759                          hdev->reset_pending);
3760                 return true;
3761         } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3762                    HCLGE_RESET_INT_M) {
3763                 dev_info(&hdev->pdev->dev,
3764                          "reset failed because new reset interrupt\n");
3765                 hclge_clear_reset_cause(hdev);
3766                 return false;
3767         } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3768                 hdev->rst_stats.reset_fail_cnt++;
3769                 set_bit(hdev->reset_type, &hdev->reset_pending);
3770                 dev_info(&hdev->pdev->dev,
3771                          "re-schedule reset task(%u)\n",
3772                          hdev->rst_stats.reset_fail_cnt);
3773                 return true;
3774         }
3775
3776         hclge_clear_reset_cause(hdev);
3777
3778         /* recover the handshake status when reset fail */
3779         hclge_reset_handshake(hdev, true);
3780
3781         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3782
3783         hclge_dbg_dump_rst_info(hdev);
3784
3785         set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3786
3787         return false;
3788 }
3789
3790 static int hclge_set_rst_done(struct hclge_dev *hdev)
3791 {
3792         struct hclge_pf_rst_done_cmd *req;
3793         struct hclge_desc desc;
3794         int ret;
3795
3796         req = (struct hclge_pf_rst_done_cmd *)desc.data;
3797         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3798         req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3799
3800         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3801         /* To be compatible with the old firmware, which does not support
3802          * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3803          * return success
3804          */
3805         if (ret == -EOPNOTSUPP) {
3806                 dev_warn(&hdev->pdev->dev,
3807                          "current firmware does not support command(0x%x)!\n",
3808                          HCLGE_OPC_PF_RST_DONE);
3809                 return 0;
3810         } else if (ret) {
3811                 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3812                         ret);
3813         }
3814
3815         return ret;
3816 }
3817
3818 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3819 {
3820         int ret = 0;
3821
3822         switch (hdev->reset_type) {
3823         case HNAE3_FUNC_RESET:
3824         case HNAE3_FLR_RESET:
3825                 ret = hclge_set_all_vf_rst(hdev, false);
3826                 break;
3827         case HNAE3_GLOBAL_RESET:
3828         case HNAE3_IMP_RESET:
3829                 ret = hclge_set_rst_done(hdev);
3830                 break;
3831         default:
3832                 break;
3833         }
3834
3835         /* clear up the handshake status after re-initialize done */
3836         hclge_reset_handshake(hdev, false);
3837
3838         return ret;
3839 }
3840
3841 static int hclge_reset_stack(struct hclge_dev *hdev)
3842 {
3843         int ret;
3844
3845         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3846         if (ret)
3847                 return ret;
3848
3849         ret = hclge_reset_ae_dev(hdev->ae_dev);
3850         if (ret)
3851                 return ret;
3852
3853         return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3854 }
3855
3856 static int hclge_reset_prepare(struct hclge_dev *hdev)
3857 {
3858         int ret;
3859
3860         hdev->rst_stats.reset_cnt++;
3861         /* perform reset of the stack & ae device for a client */
3862         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3863         if (ret)
3864                 return ret;
3865
3866         rtnl_lock();
3867         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3868         rtnl_unlock();
3869         if (ret)
3870                 return ret;
3871
3872         return hclge_reset_prepare_wait(hdev);
3873 }
3874
3875 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3876 {
3877         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3878         enum hnae3_reset_type reset_level;
3879         int ret;
3880
3881         hdev->rst_stats.hw_reset_done_cnt++;
3882
3883         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3884         if (ret)
3885                 return ret;
3886
3887         rtnl_lock();
3888         ret = hclge_reset_stack(hdev);
3889         rtnl_unlock();
3890         if (ret)
3891                 return ret;
3892
3893         hclge_clear_reset_cause(hdev);
3894
3895         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3896         /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3897          * times
3898          */
3899         if (ret &&
3900             hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3901                 return ret;
3902
3903         ret = hclge_reset_prepare_up(hdev);
3904         if (ret)
3905                 return ret;
3906
3907         rtnl_lock();
3908         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3909         rtnl_unlock();
3910         if (ret)
3911                 return ret;
3912
3913         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3914         if (ret)
3915                 return ret;
3916
3917         hdev->last_reset_time = jiffies;
3918         hdev->rst_stats.reset_fail_cnt = 0;
3919         hdev->rst_stats.reset_done_cnt++;
3920         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3921
3922         /* if default_reset_request has a higher level reset request,
3923          * it should be handled as soon as possible. since some errors
3924          * need this kind of reset to fix.
3925          */
3926         reset_level = hclge_get_reset_level(ae_dev,
3927                                             &hdev->default_reset_request);
3928         if (reset_level != HNAE3_NONE_RESET)
3929                 set_bit(reset_level, &hdev->reset_request);
3930
3931         return 0;
3932 }
3933
3934 static void hclge_reset(struct hclge_dev *hdev)
3935 {
3936         if (hclge_reset_prepare(hdev))
3937                 goto err_reset;
3938
3939         if (hclge_reset_wait(hdev))
3940                 goto err_reset;
3941
3942         if (hclge_reset_rebuild(hdev))
3943                 goto err_reset;
3944
3945         return;
3946
3947 err_reset:
3948         if (hclge_reset_err_handle(hdev))
3949                 hclge_reset_task_schedule(hdev);
3950 }
3951
3952 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3953 {
3954         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3955         struct hclge_dev *hdev = ae_dev->priv;
3956
3957         /* We might end up getting called broadly because of 2 below cases:
3958          * 1. Recoverable error was conveyed through APEI and only way to bring
3959          *    normalcy is to reset.
3960          * 2. A new reset request from the stack due to timeout
3961          *
3962          * For the first case,error event might not have ae handle available.
3963          * check if this is a new reset request and we are not here just because
3964          * last reset attempt did not succeed and watchdog hit us again. We will
3965          * know this if last reset request did not occur very recently (watchdog
3966          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3967          * In case of new request we reset the "reset level" to PF reset.
3968          * And if it is a repeat reset request of the most recent one then we
3969          * want to make sure we throttle the reset request. Therefore, we will
3970          * not allow it again before 3*HZ times.
3971          */
3972         if (!handle)
3973                 handle = &hdev->vport[0].nic;
3974
3975         if (time_before(jiffies, (hdev->last_reset_time +
3976                                   HCLGE_RESET_INTERVAL))) {
3977                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3978                 return;
3979         } else if (hdev->default_reset_request) {
3980                 hdev->reset_level =
3981                         hclge_get_reset_level(ae_dev,
3982                                               &hdev->default_reset_request);
3983         } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3984                 hdev->reset_level = HNAE3_FUNC_RESET;
3985         }
3986
3987         dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3988                  hdev->reset_level);
3989
3990         /* request reset & schedule reset task */
3991         set_bit(hdev->reset_level, &hdev->reset_request);
3992         hclge_reset_task_schedule(hdev);
3993
3994         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3995                 hdev->reset_level++;
3996 }
3997
3998 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3999                                         enum hnae3_reset_type rst_type)
4000 {
4001         struct hclge_dev *hdev = ae_dev->priv;
4002
4003         set_bit(rst_type, &hdev->default_reset_request);
4004 }
4005
4006 static void hclge_reset_timer(struct timer_list *t)
4007 {
4008         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4009
4010         /* if default_reset_request has no value, it means that this reset
4011          * request has already be handled, so just return here
4012          */
4013         if (!hdev->default_reset_request)
4014                 return;
4015
4016         dev_info(&hdev->pdev->dev,
4017                  "triggering reset in reset timer\n");
4018         hclge_reset_event(hdev->pdev, NULL);
4019 }
4020
4021 static void hclge_reset_subtask(struct hclge_dev *hdev)
4022 {
4023         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4024
4025         /* check if there is any ongoing reset in the hardware. This status can
4026          * be checked from reset_pending. If there is then, we need to wait for
4027          * hardware to complete reset.
4028          *    a. If we are able to figure out in reasonable time that hardware
4029          *       has fully resetted then, we can proceed with driver, client
4030          *       reset.
4031          *    b. else, we can come back later to check this status so re-sched
4032          *       now.
4033          */
4034         hdev->last_reset_time = jiffies;
4035         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4036         if (hdev->reset_type != HNAE3_NONE_RESET)
4037                 hclge_reset(hdev);
4038
4039         /* check if we got any *new* reset requests to be honored */
4040         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4041         if (hdev->reset_type != HNAE3_NONE_RESET)
4042                 hclge_do_reset(hdev);
4043
4044         hdev->reset_type = HNAE3_NONE_RESET;
4045 }
4046
4047 static void hclge_reset_service_task(struct hclge_dev *hdev)
4048 {
4049         if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4050                 return;
4051
4052         down(&hdev->reset_sem);
4053         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4054
4055         hclge_reset_subtask(hdev);
4056
4057         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4058         up(&hdev->reset_sem);
4059 }
4060
4061 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4062 {
4063         int i;
4064
4065         /* start from vport 1 for PF is always alive */
4066         for (i = 1; i < hdev->num_alloc_vport; i++) {
4067                 struct hclge_vport *vport = &hdev->vport[i];
4068
4069                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4070                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4071
4072                 /* If vf is not alive, set to default value */
4073                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4074                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4075         }
4076 }
4077
4078 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4079 {
4080         unsigned long delta = round_jiffies_relative(HZ);
4081
4082         if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4083                 return;
4084
4085         /* Always handle the link updating to make sure link state is
4086          * updated when it is triggered by mbx.
4087          */
4088         hclge_update_link_status(hdev);
4089         hclge_sync_mac_table(hdev);
4090         hclge_sync_promisc_mode(hdev);
4091
4092         if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4093                 delta = jiffies - hdev->last_serv_processed;
4094
4095                 if (delta < round_jiffies_relative(HZ)) {
4096                         delta = round_jiffies_relative(HZ) - delta;
4097                         goto out;
4098                 }
4099         }
4100
4101         hdev->serv_processed_cnt++;
4102         hclge_update_vport_alive(hdev);
4103
4104         if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4105                 hdev->last_serv_processed = jiffies;
4106                 goto out;
4107         }
4108
4109         if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4110                 hclge_update_stats_for_all(hdev);
4111
4112         hclge_update_port_info(hdev);
4113         hclge_sync_vlan_filter(hdev);
4114
4115         if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4116                 hclge_rfs_filter_expire(hdev);
4117
4118         hdev->last_serv_processed = jiffies;
4119
4120 out:
4121         hclge_task_schedule(hdev, delta);
4122 }
4123
4124 static void hclge_service_task(struct work_struct *work)
4125 {
4126         struct hclge_dev *hdev =
4127                 container_of(work, struct hclge_dev, service_task.work);
4128
4129         hclge_reset_service_task(hdev);
4130         hclge_mailbox_service_task(hdev);
4131         hclge_periodic_service_task(hdev);
4132
4133         /* Handle reset and mbx again in case periodical task delays the
4134          * handling by calling hclge_task_schedule() in
4135          * hclge_periodic_service_task().
4136          */
4137         hclge_reset_service_task(hdev);
4138         hclge_mailbox_service_task(hdev);
4139 }
4140
4141 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4142 {
4143         /* VF handle has no client */
4144         if (!handle->client)
4145                 return container_of(handle, struct hclge_vport, nic);
4146         else if (handle->client->type == HNAE3_CLIENT_ROCE)
4147                 return container_of(handle, struct hclge_vport, roce);
4148         else
4149                 return container_of(handle, struct hclge_vport, nic);
4150 }
4151
4152 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4153                                   struct hnae3_vector_info *vector_info)
4154 {
4155 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2  64
4156
4157         vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4158
4159         /* need an extend offset to config vector >= 64 */
4160         if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4161                 vector_info->io_addr = hdev->hw.io_base +
4162                                 HCLGE_VECTOR_REG_BASE +
4163                                 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4164         else
4165                 vector_info->io_addr = hdev->hw.io_base +
4166                                 HCLGE_VECTOR_EXT_REG_BASE +
4167                                 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4168                                 HCLGE_VECTOR_REG_OFFSET_H +
4169                                 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4170                                 HCLGE_VECTOR_REG_OFFSET;
4171
4172         hdev->vector_status[idx] = hdev->vport[0].vport_id;
4173         hdev->vector_irq[idx] = vector_info->vector;
4174 }
4175
4176 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4177                             struct hnae3_vector_info *vector_info)
4178 {
4179         struct hclge_vport *vport = hclge_get_vport(handle);
4180         struct hnae3_vector_info *vector = vector_info;
4181         struct hclge_dev *hdev = vport->back;
4182         int alloc = 0;
4183         u16 i = 0;
4184         u16 j;
4185
4186         vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4187         vector_num = min(hdev->num_msi_left, vector_num);
4188
4189         for (j = 0; j < vector_num; j++) {
4190                 while (++i < hdev->num_nic_msi) {
4191                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4192                                 hclge_get_vector_info(hdev, i, vector);
4193                                 vector++;
4194                                 alloc++;
4195
4196                                 break;
4197                         }
4198                 }
4199         }
4200         hdev->num_msi_left -= alloc;
4201         hdev->num_msi_used += alloc;
4202
4203         return alloc;
4204 }
4205
4206 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4207 {
4208         int i;
4209
4210         for (i = 0; i < hdev->num_msi; i++)
4211                 if (vector == hdev->vector_irq[i])
4212                         return i;
4213
4214         return -EINVAL;
4215 }
4216
4217 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4218 {
4219         struct hclge_vport *vport = hclge_get_vport(handle);
4220         struct hclge_dev *hdev = vport->back;
4221         int vector_id;
4222
4223         vector_id = hclge_get_vector_index(hdev, vector);
4224         if (vector_id < 0) {
4225                 dev_err(&hdev->pdev->dev,
4226                         "Get vector index fail. vector = %d\n", vector);
4227                 return vector_id;
4228         }
4229
4230         hclge_free_vector(hdev, vector_id);
4231
4232         return 0;
4233 }
4234
4235 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4236 {
4237         return HCLGE_RSS_KEY_SIZE;
4238 }
4239
4240 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4241 {
4242         return HCLGE_RSS_IND_TBL_SIZE;
4243 }
4244
4245 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4246                                   const u8 hfunc, const u8 *key)
4247 {
4248         struct hclge_rss_config_cmd *req;
4249         unsigned int key_offset = 0;
4250         struct hclge_desc desc;
4251         int key_counts;
4252         int key_size;
4253         int ret;
4254
4255         key_counts = HCLGE_RSS_KEY_SIZE;
4256         req = (struct hclge_rss_config_cmd *)desc.data;
4257
4258         while (key_counts) {
4259                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4260                                            false);
4261
4262                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4263                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4264
4265                 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4266                 memcpy(req->hash_key,
4267                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4268
4269                 key_counts -= key_size;
4270                 key_offset++;
4271                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4272                 if (ret) {
4273                         dev_err(&hdev->pdev->dev,
4274                                 "Configure RSS config fail, status = %d\n",
4275                                 ret);
4276                         return ret;
4277                 }
4278         }
4279         return 0;
4280 }
4281
4282 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4283 {
4284         struct hclge_rss_indirection_table_cmd *req;
4285         struct hclge_desc desc;
4286         u8 rss_msb_oft;
4287         u8 rss_msb_val;
4288         int ret;
4289         u16 qid;
4290         int i;
4291         u32 j;
4292
4293         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4294
4295         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4296                 hclge_cmd_setup_basic_desc
4297                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4298
4299                 req->start_table_index =
4300                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4301                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4302                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4303                         qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4304                         req->rss_qid_l[j] = qid & 0xff;
4305                         rss_msb_oft =
4306                                 j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4307                         rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4308                                 (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4309                         req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4310                 }
4311                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4312                 if (ret) {
4313                         dev_err(&hdev->pdev->dev,
4314                                 "Configure rss indir table fail,status = %d\n",
4315                                 ret);
4316                         return ret;
4317                 }
4318         }
4319         return 0;
4320 }
4321
4322 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4323                                  u16 *tc_size, u16 *tc_offset)
4324 {
4325         struct hclge_rss_tc_mode_cmd *req;
4326         struct hclge_desc desc;
4327         int ret;
4328         int i;
4329
4330         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4331         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4332
4333         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4334                 u16 mode = 0;
4335
4336                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4337                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4338                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4339                 hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4340                               tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4341                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4342                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4343
4344                 req->rss_tc_mode[i] = cpu_to_le16(mode);
4345         }
4346
4347         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4348         if (ret)
4349                 dev_err(&hdev->pdev->dev,
4350                         "Configure rss tc mode fail, status = %d\n", ret);
4351
4352         return ret;
4353 }
4354
4355 static void hclge_get_rss_type(struct hclge_vport *vport)
4356 {
4357         if (vport->rss_tuple_sets.ipv4_tcp_en ||
4358             vport->rss_tuple_sets.ipv4_udp_en ||
4359             vport->rss_tuple_sets.ipv4_sctp_en ||
4360             vport->rss_tuple_sets.ipv6_tcp_en ||
4361             vport->rss_tuple_sets.ipv6_udp_en ||
4362             vport->rss_tuple_sets.ipv6_sctp_en)
4363                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4364         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4365                  vport->rss_tuple_sets.ipv6_fragment_en)
4366                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4367         else
4368                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4369 }
4370
4371 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4372 {
4373         struct hclge_rss_input_tuple_cmd *req;
4374         struct hclge_desc desc;
4375         int ret;
4376
4377         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4378
4379         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4380
4381         /* Get the tuple cfg from pf */
4382         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4383         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4384         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4385         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4386         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4387         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4388         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4389         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4390         hclge_get_rss_type(&hdev->vport[0]);
4391         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4392         if (ret)
4393                 dev_err(&hdev->pdev->dev,
4394                         "Configure rss input fail, status = %d\n", ret);
4395         return ret;
4396 }
4397
4398 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4399                          u8 *key, u8 *hfunc)
4400 {
4401         struct hclge_vport *vport = hclge_get_vport(handle);
4402         int i;
4403
4404         /* Get hash algorithm */
4405         if (hfunc) {
4406                 switch (vport->rss_algo) {
4407                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4408                         *hfunc = ETH_RSS_HASH_TOP;
4409                         break;
4410                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4411                         *hfunc = ETH_RSS_HASH_XOR;
4412                         break;
4413                 default:
4414                         *hfunc = ETH_RSS_HASH_UNKNOWN;
4415                         break;
4416                 }
4417         }
4418
4419         /* Get the RSS Key required by the user */
4420         if (key)
4421                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4422
4423         /* Get indirect table */
4424         if (indir)
4425                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4426                         indir[i] =  vport->rss_indirection_tbl[i];
4427
4428         return 0;
4429 }
4430
4431 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4432                          const  u8 *key, const  u8 hfunc)
4433 {
4434         struct hclge_vport *vport = hclge_get_vport(handle);
4435         struct hclge_dev *hdev = vport->back;
4436         u8 hash_algo;
4437         int ret, i;
4438
4439         /* Set the RSS Hash Key if specififed by the user */
4440         if (key) {
4441                 switch (hfunc) {
4442                 case ETH_RSS_HASH_TOP:
4443                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4444                         break;
4445                 case ETH_RSS_HASH_XOR:
4446                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4447                         break;
4448                 case ETH_RSS_HASH_NO_CHANGE:
4449                         hash_algo = vport->rss_algo;
4450                         break;
4451                 default:
4452                         return -EINVAL;
4453                 }
4454
4455                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4456                 if (ret)
4457                         return ret;
4458
4459                 /* Update the shadow RSS key with user specified qids */
4460                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4461                 vport->rss_algo = hash_algo;
4462         }
4463
4464         /* Update the shadow RSS table with user specified qids */
4465         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4466                 vport->rss_indirection_tbl[i] = indir[i];
4467
4468         /* Update the hardware */
4469         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4470 }
4471
4472 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4473 {
4474         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4475
4476         if (nfc->data & RXH_L4_B_2_3)
4477                 hash_sets |= HCLGE_D_PORT_BIT;
4478         else
4479                 hash_sets &= ~HCLGE_D_PORT_BIT;
4480
4481         if (nfc->data & RXH_IP_SRC)
4482                 hash_sets |= HCLGE_S_IP_BIT;
4483         else
4484                 hash_sets &= ~HCLGE_S_IP_BIT;
4485
4486         if (nfc->data & RXH_IP_DST)
4487                 hash_sets |= HCLGE_D_IP_BIT;
4488         else
4489                 hash_sets &= ~HCLGE_D_IP_BIT;
4490
4491         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4492                 hash_sets |= HCLGE_V_TAG_BIT;
4493
4494         return hash_sets;
4495 }
4496
4497 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4498                                struct ethtool_rxnfc *nfc)
4499 {
4500         struct hclge_vport *vport = hclge_get_vport(handle);
4501         struct hclge_dev *hdev = vport->back;
4502         struct hclge_rss_input_tuple_cmd *req;
4503         struct hclge_desc desc;
4504         u8 tuple_sets;
4505         int ret;
4506
4507         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4508                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
4509                 return -EINVAL;
4510
4511         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4512         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4513
4514         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4515         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4516         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4517         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4518         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4519         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4520         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4521         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4522
4523         tuple_sets = hclge_get_rss_hash_bits(nfc);
4524         switch (nfc->flow_type) {
4525         case TCP_V4_FLOW:
4526                 req->ipv4_tcp_en = tuple_sets;
4527                 break;
4528         case TCP_V6_FLOW:
4529                 req->ipv6_tcp_en = tuple_sets;
4530                 break;
4531         case UDP_V4_FLOW:
4532                 req->ipv4_udp_en = tuple_sets;
4533                 break;
4534         case UDP_V6_FLOW:
4535                 req->ipv6_udp_en = tuple_sets;
4536                 break;
4537         case SCTP_V4_FLOW:
4538                 req->ipv4_sctp_en = tuple_sets;
4539                 break;
4540         case SCTP_V6_FLOW:
4541                 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4542                     (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4543                         return -EINVAL;
4544
4545                 req->ipv6_sctp_en = tuple_sets;
4546                 break;
4547         case IPV4_FLOW:
4548                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4549                 break;
4550         case IPV6_FLOW:
4551                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4552                 break;
4553         default:
4554                 return -EINVAL;
4555         }
4556
4557         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4558         if (ret) {
4559                 dev_err(&hdev->pdev->dev,
4560                         "Set rss tuple fail, status = %d\n", ret);
4561                 return ret;
4562         }
4563
4564         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4565         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4566         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4567         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4568         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4569         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4570         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4571         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4572         hclge_get_rss_type(vport);
4573         return 0;
4574 }
4575
4576 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4577                                struct ethtool_rxnfc *nfc)
4578 {
4579         struct hclge_vport *vport = hclge_get_vport(handle);
4580         u8 tuple_sets;
4581
4582         nfc->data = 0;
4583
4584         switch (nfc->flow_type) {
4585         case TCP_V4_FLOW:
4586                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4587                 break;
4588         case UDP_V4_FLOW:
4589                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4590                 break;
4591         case TCP_V6_FLOW:
4592                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4593                 break;
4594         case UDP_V6_FLOW:
4595                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4596                 break;
4597         case SCTP_V4_FLOW:
4598                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4599                 break;
4600         case SCTP_V6_FLOW:
4601                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4602                 break;
4603         case IPV4_FLOW:
4604         case IPV6_FLOW:
4605                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4606                 break;
4607         default:
4608                 return -EINVAL;
4609         }
4610
4611         if (!tuple_sets)
4612                 return 0;
4613
4614         if (tuple_sets & HCLGE_D_PORT_BIT)
4615                 nfc->data |= RXH_L4_B_2_3;
4616         if (tuple_sets & HCLGE_S_PORT_BIT)
4617                 nfc->data |= RXH_L4_B_0_1;
4618         if (tuple_sets & HCLGE_D_IP_BIT)
4619                 nfc->data |= RXH_IP_DST;
4620         if (tuple_sets & HCLGE_S_IP_BIT)
4621                 nfc->data |= RXH_IP_SRC;
4622
4623         return 0;
4624 }
4625
4626 static int hclge_get_tc_size(struct hnae3_handle *handle)
4627 {
4628         struct hclge_vport *vport = hclge_get_vport(handle);
4629         struct hclge_dev *hdev = vport->back;
4630
4631         return hdev->pf_rss_size_max;
4632 }
4633
4634 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4635 {
4636         struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4637         struct hclge_vport *vport = hdev->vport;
4638         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4639         u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4640         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4641         struct hnae3_tc_info *tc_info;
4642         u16 roundup_size;
4643         u16 rss_size;
4644         int i;
4645
4646         tc_info = &vport->nic.kinfo.tc_info;
4647         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4648                 rss_size = tc_info->tqp_count[i];
4649                 tc_valid[i] = 0;
4650
4651                 if (!(hdev->hw_tc_map & BIT(i)))
4652                         continue;
4653
4654                 /* tc_size set to hardware is the log2 of roundup power of two
4655                  * of rss_size, the acutal queue size is limited by indirection
4656                  * table.
4657                  */
4658                 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4659                     rss_size == 0) {
4660                         dev_err(&hdev->pdev->dev,
4661                                 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4662                                 rss_size);
4663                         return -EINVAL;
4664                 }
4665
4666                 roundup_size = roundup_pow_of_two(rss_size);
4667                 roundup_size = ilog2(roundup_size);
4668
4669                 tc_valid[i] = 1;
4670                 tc_size[i] = roundup_size;
4671                 tc_offset[i] = tc_info->tqp_offset[i];
4672         }
4673
4674         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4675 }
4676
4677 int hclge_rss_init_hw(struct hclge_dev *hdev)
4678 {
4679         struct hclge_vport *vport = hdev->vport;
4680         u16 *rss_indir = vport[0].rss_indirection_tbl;
4681         u8 *key = vport[0].rss_hash_key;
4682         u8 hfunc = vport[0].rss_algo;
4683         int ret;
4684
4685         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4686         if (ret)
4687                 return ret;
4688
4689         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4690         if (ret)
4691                 return ret;
4692
4693         ret = hclge_set_rss_input_tuple(hdev);
4694         if (ret)
4695                 return ret;
4696
4697         return hclge_init_rss_tc_mode(hdev);
4698 }
4699
4700 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4701 {
4702         struct hclge_vport *vport = hdev->vport;
4703         int i, j;
4704
4705         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4706                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4707                         vport[j].rss_indirection_tbl[i] =
4708                                 i % vport[j].alloc_rss_size;
4709         }
4710 }
4711
4712 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4713 {
4714         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4715         struct hclge_vport *vport = hdev->vport;
4716
4717         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
4718                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4719
4720         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4721                 vport[i].rss_tuple_sets.ipv4_tcp_en =
4722                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4723                 vport[i].rss_tuple_sets.ipv4_udp_en =
4724                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4725                 vport[i].rss_tuple_sets.ipv4_sctp_en =
4726                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4727                 vport[i].rss_tuple_sets.ipv4_fragment_en =
4728                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4729                 vport[i].rss_tuple_sets.ipv6_tcp_en =
4730                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4731                 vport[i].rss_tuple_sets.ipv6_udp_en =
4732                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4733                 vport[i].rss_tuple_sets.ipv6_sctp_en =
4734                         hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
4735                         HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
4736                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4737                 vport[i].rss_tuple_sets.ipv6_fragment_en =
4738                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4739
4740                 vport[i].rss_algo = rss_algo;
4741
4742                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4743                        HCLGE_RSS_KEY_SIZE);
4744         }
4745
4746         hclge_rss_indir_init_cfg(hdev);
4747 }
4748
4749 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4750                                 int vector_id, bool en,
4751                                 struct hnae3_ring_chain_node *ring_chain)
4752 {
4753         struct hclge_dev *hdev = vport->back;
4754         struct hnae3_ring_chain_node *node;
4755         struct hclge_desc desc;
4756         struct hclge_ctrl_vector_chain_cmd *req =
4757                 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4758         enum hclge_cmd_status status;
4759         enum hclge_opcode_type op;
4760         u16 tqp_type_and_id;
4761         int i;
4762
4763         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4764         hclge_cmd_setup_basic_desc(&desc, op, false);
4765         req->int_vector_id_l = hnae3_get_field(vector_id,
4766                                                HCLGE_VECTOR_ID_L_M,
4767                                                HCLGE_VECTOR_ID_L_S);
4768         req->int_vector_id_h = hnae3_get_field(vector_id,
4769                                                HCLGE_VECTOR_ID_H_M,
4770                                                HCLGE_VECTOR_ID_H_S);
4771
4772         i = 0;
4773         for (node = ring_chain; node; node = node->next) {
4774                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4775                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4776                                 HCLGE_INT_TYPE_S,
4777                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4778                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4779                                 HCLGE_TQP_ID_S, node->tqp_index);
4780                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4781                                 HCLGE_INT_GL_IDX_S,
4782                                 hnae3_get_field(node->int_gl_idx,
4783                                                 HNAE3_RING_GL_IDX_M,
4784                                                 HNAE3_RING_GL_IDX_S));
4785                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4786                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4787                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4788                         req->vfid = vport->vport_id;
4789
4790                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4791                         if (status) {
4792                                 dev_err(&hdev->pdev->dev,
4793                                         "Map TQP fail, status is %d.\n",
4794                                         status);
4795                                 return -EIO;
4796                         }
4797                         i = 0;
4798
4799                         hclge_cmd_setup_basic_desc(&desc,
4800                                                    op,
4801                                                    false);
4802                         req->int_vector_id_l =
4803                                 hnae3_get_field(vector_id,
4804                                                 HCLGE_VECTOR_ID_L_M,
4805                                                 HCLGE_VECTOR_ID_L_S);
4806                         req->int_vector_id_h =
4807                                 hnae3_get_field(vector_id,
4808                                                 HCLGE_VECTOR_ID_H_M,
4809                                                 HCLGE_VECTOR_ID_H_S);
4810                 }
4811         }
4812
4813         if (i > 0) {
4814                 req->int_cause_num = i;
4815                 req->vfid = vport->vport_id;
4816                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4817                 if (status) {
4818                         dev_err(&hdev->pdev->dev,
4819                                 "Map TQP fail, status is %d.\n", status);
4820                         return -EIO;
4821                 }
4822         }
4823
4824         return 0;
4825 }
4826
4827 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4828                                     struct hnae3_ring_chain_node *ring_chain)
4829 {
4830         struct hclge_vport *vport = hclge_get_vport(handle);
4831         struct hclge_dev *hdev = vport->back;
4832         int vector_id;
4833
4834         vector_id = hclge_get_vector_index(hdev, vector);
4835         if (vector_id < 0) {
4836                 dev_err(&hdev->pdev->dev,
4837                         "failed to get vector index. vector=%d\n", vector);
4838                 return vector_id;
4839         }
4840
4841         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4842 }
4843
4844 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4845                                        struct hnae3_ring_chain_node *ring_chain)
4846 {
4847         struct hclge_vport *vport = hclge_get_vport(handle);
4848         struct hclge_dev *hdev = vport->back;
4849         int vector_id, ret;
4850
4851         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4852                 return 0;
4853
4854         vector_id = hclge_get_vector_index(hdev, vector);
4855         if (vector_id < 0) {
4856                 dev_err(&handle->pdev->dev,
4857                         "Get vector index fail. ret =%d\n", vector_id);
4858                 return vector_id;
4859         }
4860
4861         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4862         if (ret)
4863                 dev_err(&handle->pdev->dev,
4864                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4865                         vector_id, ret);
4866
4867         return ret;
4868 }
4869
4870 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
4871                                       bool en_uc, bool en_mc, bool en_bc)
4872 {
4873         struct hclge_vport *vport = &hdev->vport[vf_id];
4874         struct hnae3_handle *handle = &vport->nic;
4875         struct hclge_promisc_cfg_cmd *req;
4876         struct hclge_desc desc;
4877         bool uc_tx_en = en_uc;
4878         u8 promisc_cfg = 0;
4879         int ret;
4880
4881         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4882
4883         req = (struct hclge_promisc_cfg_cmd *)desc.data;
4884         req->vf_id = vf_id;
4885
4886         if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
4887                 uc_tx_en = false;
4888
4889         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
4890         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
4891         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
4892         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
4893         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
4894         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
4895         req->extend_promisc = promisc_cfg;
4896
4897         /* to be compatible with DEVICE_VERSION_V1/2 */
4898         promisc_cfg = 0;
4899         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
4900         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
4901         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
4902         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
4903         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
4904         req->promisc = promisc_cfg;
4905
4906         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4907         if (ret)
4908                 dev_err(&hdev->pdev->dev,
4909                         "failed to set vport %u promisc mode, ret = %d.\n",
4910                         vf_id, ret);
4911
4912         return ret;
4913 }
4914
4915 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4916                                  bool en_mc_pmc, bool en_bc_pmc)
4917 {
4918         return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
4919                                           en_uc_pmc, en_mc_pmc, en_bc_pmc);
4920 }
4921
4922 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4923                                   bool en_mc_pmc)
4924 {
4925         struct hclge_vport *vport = hclge_get_vport(handle);
4926         struct hclge_dev *hdev = vport->back;
4927         bool en_bc_pmc = true;
4928
4929         /* For device whose version below V2, if broadcast promisc enabled,
4930          * vlan filter is always bypassed. So broadcast promisc should be
4931          * disabled until user enable promisc mode
4932          */
4933         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
4934                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4935
4936         return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4937                                             en_bc_pmc);
4938 }
4939
4940 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
4941 {
4942         struct hclge_vport *vport = hclge_get_vport(handle);
4943         struct hclge_dev *hdev = vport->back;
4944
4945         set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
4946 }
4947
4948 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4949 {
4950         struct hclge_get_fd_mode_cmd *req;
4951         struct hclge_desc desc;
4952         int ret;
4953
4954         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4955
4956         req = (struct hclge_get_fd_mode_cmd *)desc.data;
4957
4958         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4959         if (ret) {
4960                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4961                 return ret;
4962         }
4963
4964         *fd_mode = req->mode;
4965
4966         return ret;
4967 }
4968
4969 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4970                                    u32 *stage1_entry_num,
4971                                    u32 *stage2_entry_num,
4972                                    u16 *stage1_counter_num,
4973                                    u16 *stage2_counter_num)
4974 {
4975         struct hclge_get_fd_allocation_cmd *req;
4976         struct hclge_desc desc;
4977         int ret;
4978
4979         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4980
4981         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4982
4983         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4984         if (ret) {
4985                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4986                         ret);
4987                 return ret;
4988         }
4989
4990         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4991         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4992         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4993         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4994
4995         return ret;
4996 }
4997
4998 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
4999                                    enum HCLGE_FD_STAGE stage_num)
5000 {
5001         struct hclge_set_fd_key_config_cmd *req;
5002         struct hclge_fd_key_cfg *stage;
5003         struct hclge_desc desc;
5004         int ret;
5005
5006         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5007
5008         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5009         stage = &hdev->fd_cfg.key_cfg[stage_num];
5010         req->stage = stage_num;
5011         req->key_select = stage->key_sel;
5012         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5013         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5014         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5015         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5016         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5017         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5018
5019         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5020         if (ret)
5021                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5022
5023         return ret;
5024 }
5025
5026 static int hclge_init_fd_config(struct hclge_dev *hdev)
5027 {
5028 #define LOW_2_WORDS             0x03
5029         struct hclge_fd_key_cfg *key_cfg;
5030         int ret;
5031
5032         if (!hnae3_dev_fd_supported(hdev))
5033                 return 0;
5034
5035         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5036         if (ret)
5037                 return ret;
5038
5039         switch (hdev->fd_cfg.fd_mode) {
5040         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5041                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5042                 break;
5043         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5044                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5045                 break;
5046         default:
5047                 dev_err(&hdev->pdev->dev,
5048                         "Unsupported flow director mode %u\n",
5049                         hdev->fd_cfg.fd_mode);
5050                 return -EOPNOTSUPP;
5051         }
5052
5053         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5054         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5055         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5056         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5057         key_cfg->outer_sipv6_word_en = 0;
5058         key_cfg->outer_dipv6_word_en = 0;
5059
5060         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5061                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5062                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5063                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5064
5065         /* If use max 400bit key, we can support tuples for ether type */
5066         if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
5067                 key_cfg->tuple_active |=
5068                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5069
5070         /* roce_type is used to filter roce frames
5071          * dst_vport is used to specify the rule
5072          */
5073         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5074
5075         ret = hclge_get_fd_allocation(hdev,
5076                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5077                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5078                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5079                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5080         if (ret)
5081                 return ret;
5082
5083         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5084 }
5085
5086 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5087                                 int loc, u8 *key, bool is_add)
5088 {
5089         struct hclge_fd_tcam_config_1_cmd *req1;
5090         struct hclge_fd_tcam_config_2_cmd *req2;
5091         struct hclge_fd_tcam_config_3_cmd *req3;
5092         struct hclge_desc desc[3];
5093         int ret;
5094
5095         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5096         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5097         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5098         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5099         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5100
5101         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5102         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5103         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5104
5105         req1->stage = stage;
5106         req1->xy_sel = sel_x ? 1 : 0;
5107         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5108         req1->index = cpu_to_le32(loc);
5109         req1->entry_vld = sel_x ? is_add : 0;
5110
5111         if (key) {
5112                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5113                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5114                        sizeof(req2->tcam_data));
5115                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5116                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5117         }
5118
5119         ret = hclge_cmd_send(&hdev->hw, desc, 3);
5120         if (ret)
5121                 dev_err(&hdev->pdev->dev,
5122                         "config tcam key fail, ret=%d\n",
5123                         ret);
5124
5125         return ret;
5126 }
5127
5128 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5129                               struct hclge_fd_ad_data *action)
5130 {
5131         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5132         struct hclge_fd_ad_config_cmd *req;
5133         struct hclge_desc desc;
5134         u64 ad_data = 0;
5135         int ret;
5136
5137         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5138
5139         req = (struct hclge_fd_ad_config_cmd *)desc.data;
5140         req->index = cpu_to_le32(loc);
5141         req->stage = stage;
5142
5143         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5144                       action->write_rule_id_to_bd);
5145         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5146                         action->rule_id);
5147         if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5148                 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5149                               action->override_tc);
5150                 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5151                                 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5152         }
5153         ad_data <<= 32;
5154         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5155         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5156                       action->forward_to_direct_queue);
5157         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5158                         action->queue_id);
5159         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5160         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5161                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5162         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5163         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5164                         action->counter_id);
5165
5166         req->ad_data = cpu_to_le64(ad_data);
5167         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5168         if (ret)
5169                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5170
5171         return ret;
5172 }
5173
5174 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5175                                    struct hclge_fd_rule *rule)
5176 {
5177         u16 tmp_x_s, tmp_y_s;
5178         u32 tmp_x_l, tmp_y_l;
5179         int i;
5180
5181         if (rule->unused_tuple & tuple_bit)
5182                 return true;
5183
5184         switch (tuple_bit) {
5185         case BIT(INNER_DST_MAC):
5186                 for (i = 0; i < ETH_ALEN; i++) {
5187                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5188                                rule->tuples_mask.dst_mac[i]);
5189                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5190                                rule->tuples_mask.dst_mac[i]);
5191                 }
5192
5193                 return true;
5194         case BIT(INNER_SRC_MAC):
5195                 for (i = 0; i < ETH_ALEN; i++) {
5196                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5197                                rule->tuples.src_mac[i]);
5198                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5199                                rule->tuples.src_mac[i]);
5200                 }
5201
5202                 return true;
5203         case BIT(INNER_VLAN_TAG_FST):
5204                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5205                        rule->tuples_mask.vlan_tag1);
5206                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5207                        rule->tuples_mask.vlan_tag1);
5208                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5209                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5210
5211                 return true;
5212         case BIT(INNER_ETH_TYPE):
5213                 calc_x(tmp_x_s, rule->tuples.ether_proto,
5214                        rule->tuples_mask.ether_proto);
5215                 calc_y(tmp_y_s, rule->tuples.ether_proto,
5216                        rule->tuples_mask.ether_proto);
5217                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5218                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5219
5220                 return true;
5221         case BIT(INNER_IP_TOS):
5222                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5223                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5224
5225                 return true;
5226         case BIT(INNER_IP_PROTO):
5227                 calc_x(*key_x, rule->tuples.ip_proto,
5228                        rule->tuples_mask.ip_proto);
5229                 calc_y(*key_y, rule->tuples.ip_proto,
5230                        rule->tuples_mask.ip_proto);
5231
5232                 return true;
5233         case BIT(INNER_SRC_IP):
5234                 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5235                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5236                 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5237                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5238                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5239                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5240
5241                 return true;
5242         case BIT(INNER_DST_IP):
5243                 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5244                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5245                 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5246                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5247                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5248                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5249
5250                 return true;
5251         case BIT(INNER_SRC_PORT):
5252                 calc_x(tmp_x_s, rule->tuples.src_port,
5253                        rule->tuples_mask.src_port);
5254                 calc_y(tmp_y_s, rule->tuples.src_port,
5255                        rule->tuples_mask.src_port);
5256                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5257                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5258
5259                 return true;
5260         case BIT(INNER_DST_PORT):
5261                 calc_x(tmp_x_s, rule->tuples.dst_port,
5262                        rule->tuples_mask.dst_port);
5263                 calc_y(tmp_y_s, rule->tuples.dst_port,
5264                        rule->tuples_mask.dst_port);
5265                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5266                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5267
5268                 return true;
5269         default:
5270                 return false;
5271         }
5272 }
5273
5274 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5275                                  u8 vf_id, u8 network_port_id)
5276 {
5277         u32 port_number = 0;
5278
5279         if (port_type == HOST_PORT) {
5280                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5281                                 pf_id);
5282                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5283                                 vf_id);
5284                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5285         } else {
5286                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5287                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5288                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5289         }
5290
5291         return port_number;
5292 }
5293
5294 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5295                                        __le32 *key_x, __le32 *key_y,
5296                                        struct hclge_fd_rule *rule)
5297 {
5298         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5299         u8 cur_pos = 0, tuple_size, shift_bits;
5300         unsigned int i;
5301
5302         for (i = 0; i < MAX_META_DATA; i++) {
5303                 tuple_size = meta_data_key_info[i].key_length;
5304                 tuple_bit = key_cfg->meta_data_active & BIT(i);
5305
5306                 switch (tuple_bit) {
5307                 case BIT(ROCE_TYPE):
5308                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5309                         cur_pos += tuple_size;
5310                         break;
5311                 case BIT(DST_VPORT):
5312                         port_number = hclge_get_port_number(HOST_PORT, 0,
5313                                                             rule->vf_id, 0);
5314                         hnae3_set_field(meta_data,
5315                                         GENMASK(cur_pos + tuple_size, cur_pos),
5316                                         cur_pos, port_number);
5317                         cur_pos += tuple_size;
5318                         break;
5319                 default:
5320                         break;
5321                 }
5322         }
5323
5324         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5325         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5326         shift_bits = sizeof(meta_data) * 8 - cur_pos;
5327
5328         *key_x = cpu_to_le32(tmp_x << shift_bits);
5329         *key_y = cpu_to_le32(tmp_y << shift_bits);
5330 }
5331
5332 /* A complete key is combined with meta data key and tuple key.
5333  * Meta data key is stored at the MSB region, and tuple key is stored at
5334  * the LSB region, unused bits will be filled 0.
5335  */
5336 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5337                             struct hclge_fd_rule *rule)
5338 {
5339         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5340         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5341         u8 *cur_key_x, *cur_key_y;
5342         u8 meta_data_region;
5343         u8 tuple_size;
5344         int ret;
5345         u32 i;
5346
5347         memset(key_x, 0, sizeof(key_x));
5348         memset(key_y, 0, sizeof(key_y));
5349         cur_key_x = key_x;
5350         cur_key_y = key_y;
5351
5352         for (i = 0 ; i < MAX_TUPLE; i++) {
5353                 bool tuple_valid;
5354                 u32 check_tuple;
5355
5356                 tuple_size = tuple_key_info[i].key_length / 8;
5357                 check_tuple = key_cfg->tuple_active & BIT(i);
5358
5359                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5360                                                      cur_key_y, rule);
5361                 if (tuple_valid) {
5362                         cur_key_x += tuple_size;
5363                         cur_key_y += tuple_size;
5364                 }
5365         }
5366
5367         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5368                         MAX_META_DATA_LENGTH / 8;
5369
5370         hclge_fd_convert_meta_data(key_cfg,
5371                                    (__le32 *)(key_x + meta_data_region),
5372                                    (__le32 *)(key_y + meta_data_region),
5373                                    rule);
5374
5375         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5376                                    true);
5377         if (ret) {
5378                 dev_err(&hdev->pdev->dev,
5379                         "fd key_y config fail, loc=%u, ret=%d\n",
5380                         rule->queue_id, ret);
5381                 return ret;
5382         }
5383
5384         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5385                                    true);
5386         if (ret)
5387                 dev_err(&hdev->pdev->dev,
5388                         "fd key_x config fail, loc=%u, ret=%d\n",
5389                         rule->queue_id, ret);
5390         return ret;
5391 }
5392
5393 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5394                                struct hclge_fd_rule *rule)
5395 {
5396         struct hclge_vport *vport = hdev->vport;
5397         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5398         struct hclge_fd_ad_data ad_data;
5399
5400         memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5401         ad_data.ad_id = rule->location;
5402
5403         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5404                 ad_data.drop_packet = true;
5405         } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5406                 ad_data.override_tc = true;
5407                 ad_data.queue_id =
5408                         kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5409                 ad_data.tc_size =
5410                         ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5411         } else {
5412                 ad_data.forward_to_direct_queue = true;
5413                 ad_data.queue_id = rule->queue_id;
5414         }
5415
5416         ad_data.use_counter = false;
5417         ad_data.counter_id = 0;
5418
5419         ad_data.use_next_stage = false;
5420         ad_data.next_input_key = 0;
5421
5422         ad_data.write_rule_id_to_bd = true;
5423         ad_data.rule_id = rule->location;
5424
5425         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5426 }
5427
5428 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5429                                        u32 *unused_tuple)
5430 {
5431         if (!spec || !unused_tuple)
5432                 return -EINVAL;
5433
5434         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5435
5436         if (!spec->ip4src)
5437                 *unused_tuple |= BIT(INNER_SRC_IP);
5438
5439         if (!spec->ip4dst)
5440                 *unused_tuple |= BIT(INNER_DST_IP);
5441
5442         if (!spec->psrc)
5443                 *unused_tuple |= BIT(INNER_SRC_PORT);
5444
5445         if (!spec->pdst)
5446                 *unused_tuple |= BIT(INNER_DST_PORT);
5447
5448         if (!spec->tos)
5449                 *unused_tuple |= BIT(INNER_IP_TOS);
5450
5451         return 0;
5452 }
5453
5454 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5455                                     u32 *unused_tuple)
5456 {
5457         if (!spec || !unused_tuple)
5458                 return -EINVAL;
5459
5460         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5461                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5462
5463         if (!spec->ip4src)
5464                 *unused_tuple |= BIT(INNER_SRC_IP);
5465
5466         if (!spec->ip4dst)
5467                 *unused_tuple |= BIT(INNER_DST_IP);
5468
5469         if (!spec->tos)
5470                 *unused_tuple |= BIT(INNER_IP_TOS);
5471
5472         if (!spec->proto)
5473                 *unused_tuple |= BIT(INNER_IP_PROTO);
5474
5475         if (spec->l4_4_bytes)
5476                 return -EOPNOTSUPP;
5477
5478         if (spec->ip_ver != ETH_RX_NFC_IP4)
5479                 return -EOPNOTSUPP;
5480
5481         return 0;
5482 }
5483
5484 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5485                                        u32 *unused_tuple)
5486 {
5487         if (!spec || !unused_tuple)
5488                 return -EINVAL;
5489
5490         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5491                 BIT(INNER_IP_TOS);
5492
5493         /* check whether src/dst ip address used */
5494         if (!spec->ip6src[0] && !spec->ip6src[1] &&
5495             !spec->ip6src[2] && !spec->ip6src[3])
5496                 *unused_tuple |= BIT(INNER_SRC_IP);
5497
5498         if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5499             !spec->ip6dst[2] && !spec->ip6dst[3])
5500                 *unused_tuple |= BIT(INNER_DST_IP);
5501
5502         if (!spec->psrc)
5503                 *unused_tuple |= BIT(INNER_SRC_PORT);
5504
5505         if (!spec->pdst)
5506                 *unused_tuple |= BIT(INNER_DST_PORT);
5507
5508         if (spec->tclass)
5509                 return -EOPNOTSUPP;
5510
5511         return 0;
5512 }
5513
5514 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5515                                     u32 *unused_tuple)
5516 {
5517         if (!spec || !unused_tuple)
5518                 return -EINVAL;
5519
5520         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5521                 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5522
5523         /* check whether src/dst ip address used */
5524         if (!spec->ip6src[0] && !spec->ip6src[1] &&
5525             !spec->ip6src[2] && !spec->ip6src[3])
5526                 *unused_tuple |= BIT(INNER_SRC_IP);
5527
5528         if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5529             !spec->ip6dst[2] && !spec->ip6dst[3])
5530                 *unused_tuple |= BIT(INNER_DST_IP);
5531
5532         if (!spec->l4_proto)
5533                 *unused_tuple |= BIT(INNER_IP_PROTO);
5534
5535         if (spec->tclass)
5536                 return -EOPNOTSUPP;
5537
5538         if (spec->l4_4_bytes)
5539                 return -EOPNOTSUPP;
5540
5541         return 0;
5542 }
5543
5544 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5545 {
5546         if (!spec || !unused_tuple)
5547                 return -EINVAL;
5548
5549         *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5550                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5551                 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5552
5553         if (is_zero_ether_addr(spec->h_source))
5554                 *unused_tuple |= BIT(INNER_SRC_MAC);
5555
5556         if (is_zero_ether_addr(spec->h_dest))
5557                 *unused_tuple |= BIT(INNER_DST_MAC);
5558
5559         if (!spec->h_proto)
5560                 *unused_tuple |= BIT(INNER_ETH_TYPE);
5561
5562         return 0;
5563 }
5564
5565 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5566                                     struct ethtool_rx_flow_spec *fs,
5567                                     u32 *unused_tuple)
5568 {
5569         if (fs->flow_type & FLOW_EXT) {
5570                 if (fs->h_ext.vlan_etype) {
5571                         dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5572                         return -EOPNOTSUPP;
5573                 }
5574
5575                 if (!fs->h_ext.vlan_tci)
5576                         *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5577
5578                 if (fs->m_ext.vlan_tci &&
5579                     be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5580                         dev_err(&hdev->pdev->dev,
5581                                 "failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n",
5582                                 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5583                         return -EINVAL;
5584                 }
5585         } else {
5586                 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5587         }
5588
5589         if (fs->flow_type & FLOW_MAC_EXT) {
5590                 if (hdev->fd_cfg.fd_mode !=
5591                     HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5592                         dev_err(&hdev->pdev->dev,
5593                                 "FLOW_MAC_EXT is not supported in current fd mode!\n");
5594                         return -EOPNOTSUPP;
5595                 }
5596
5597                 if (is_zero_ether_addr(fs->h_ext.h_dest))
5598                         *unused_tuple |= BIT(INNER_DST_MAC);
5599                 else
5600                         *unused_tuple &= ~BIT(INNER_DST_MAC);
5601         }
5602
5603         return 0;
5604 }
5605
5606 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5607                                struct ethtool_rx_flow_spec *fs,
5608                                u32 *unused_tuple)
5609 {
5610         u32 flow_type;
5611         int ret;
5612
5613         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5614                 dev_err(&hdev->pdev->dev,
5615                         "failed to config fd rules, invalid rule location: %u, max is %u\n.",
5616                         fs->location,
5617                         hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5618                 return -EINVAL;
5619         }
5620
5621         if ((fs->flow_type & FLOW_EXT) &&
5622             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5623                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5624                 return -EOPNOTSUPP;
5625         }
5626
5627         flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5628         switch (flow_type) {
5629         case SCTP_V4_FLOW:
5630         case TCP_V4_FLOW:
5631         case UDP_V4_FLOW:
5632                 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5633                                                   unused_tuple);
5634                 break;
5635         case IP_USER_FLOW:
5636                 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5637                                                unused_tuple);
5638                 break;
5639         case SCTP_V6_FLOW:
5640         case TCP_V6_FLOW:
5641         case UDP_V6_FLOW:
5642                 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5643                                                   unused_tuple);
5644                 break;
5645         case IPV6_USER_FLOW:
5646                 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5647                                                unused_tuple);
5648                 break;
5649         case ETHER_FLOW:
5650                 if (hdev->fd_cfg.fd_mode !=
5651                         HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5652                         dev_err(&hdev->pdev->dev,
5653                                 "ETHER_FLOW is not supported in current fd mode!\n");
5654                         return -EOPNOTSUPP;
5655                 }
5656
5657                 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5658                                                  unused_tuple);
5659                 break;
5660         default:
5661                 dev_err(&hdev->pdev->dev,
5662                         "unsupported protocol type, protocol type = %#x\n",
5663                         flow_type);
5664                 return -EOPNOTSUPP;
5665         }
5666
5667         if (ret) {
5668                 dev_err(&hdev->pdev->dev,
5669                         "failed to check flow union tuple, ret = %d\n",
5670                         ret);
5671                 return ret;
5672         }
5673
5674         return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5675 }
5676
5677 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5678 {
5679         struct hclge_fd_rule *rule = NULL;
5680         struct hlist_node *node2;
5681
5682         spin_lock_bh(&hdev->fd_rule_lock);
5683         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5684                 if (rule->location >= location)
5685                         break;
5686         }
5687
5688         spin_unlock_bh(&hdev->fd_rule_lock);
5689
5690         return  rule && rule->location == location;
5691 }
5692
5693 /* make sure being called after lock up with fd_rule_lock */
5694 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5695                                      struct hclge_fd_rule *new_rule,
5696                                      u16 location,
5697                                      bool is_add)
5698 {
5699         struct hclge_fd_rule *rule = NULL, *parent = NULL;
5700         struct hlist_node *node2;
5701
5702         if (is_add && !new_rule)
5703                 return -EINVAL;
5704
5705         hlist_for_each_entry_safe(rule, node2,
5706                                   &hdev->fd_rule_list, rule_node) {
5707                 if (rule->location >= location)
5708                         break;
5709                 parent = rule;
5710         }
5711
5712         if (rule && rule->location == location) {
5713                 hlist_del(&rule->rule_node);
5714                 kfree(rule);
5715                 hdev->hclge_fd_rule_num--;
5716
5717                 if (!is_add) {
5718                         if (!hdev->hclge_fd_rule_num)
5719                                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5720                         clear_bit(location, hdev->fd_bmap);
5721
5722                         return 0;
5723                 }
5724         } else if (!is_add) {
5725                 dev_err(&hdev->pdev->dev,
5726                         "delete fail, rule %u is inexistent\n",
5727                         location);
5728                 return -EINVAL;
5729         }
5730
5731         INIT_HLIST_NODE(&new_rule->rule_node);
5732
5733         if (parent)
5734                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5735         else
5736                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5737
5738         set_bit(location, hdev->fd_bmap);
5739         hdev->hclge_fd_rule_num++;
5740         hdev->fd_active_type = new_rule->rule_type;
5741
5742         return 0;
5743 }
5744
5745 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5746                               struct ethtool_rx_flow_spec *fs,
5747                               struct hclge_fd_rule *rule)
5748 {
5749         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5750
5751         switch (flow_type) {
5752         case SCTP_V4_FLOW:
5753         case TCP_V4_FLOW:
5754         case UDP_V4_FLOW:
5755                 rule->tuples.src_ip[IPV4_INDEX] =
5756                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5757                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5758                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5759
5760                 rule->tuples.dst_ip[IPV4_INDEX] =
5761                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5762                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5763                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5764
5765                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5766                 rule->tuples_mask.src_port =
5767                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5768
5769                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5770                 rule->tuples_mask.dst_port =
5771                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5772
5773                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5774                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5775
5776                 rule->tuples.ether_proto = ETH_P_IP;
5777                 rule->tuples_mask.ether_proto = 0xFFFF;
5778
5779                 break;
5780         case IP_USER_FLOW:
5781                 rule->tuples.src_ip[IPV4_INDEX] =
5782                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5783                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5784                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5785
5786                 rule->tuples.dst_ip[IPV4_INDEX] =
5787                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5788                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5789                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5790
5791                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5792                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5793
5794                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5795                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5796
5797                 rule->tuples.ether_proto = ETH_P_IP;
5798                 rule->tuples_mask.ether_proto = 0xFFFF;
5799
5800                 break;
5801         case SCTP_V6_FLOW:
5802         case TCP_V6_FLOW:
5803         case UDP_V6_FLOW:
5804                 be32_to_cpu_array(rule->tuples.src_ip,
5805                                   fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5806                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5807                                   fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5808
5809                 be32_to_cpu_array(rule->tuples.dst_ip,
5810                                   fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5811                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5812                                   fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5813
5814                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5815                 rule->tuples_mask.src_port =
5816                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5817
5818                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5819                 rule->tuples_mask.dst_port =
5820                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5821
5822                 rule->tuples.ether_proto = ETH_P_IPV6;
5823                 rule->tuples_mask.ether_proto = 0xFFFF;
5824
5825                 break;
5826         case IPV6_USER_FLOW:
5827                 be32_to_cpu_array(rule->tuples.src_ip,
5828                                   fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5829                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5830                                   fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5831
5832                 be32_to_cpu_array(rule->tuples.dst_ip,
5833                                   fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5834                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5835                                   fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5836
5837                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5838                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5839
5840                 rule->tuples.ether_proto = ETH_P_IPV6;
5841                 rule->tuples_mask.ether_proto = 0xFFFF;
5842
5843                 break;
5844         case ETHER_FLOW:
5845                 ether_addr_copy(rule->tuples.src_mac,
5846                                 fs->h_u.ether_spec.h_source);
5847                 ether_addr_copy(rule->tuples_mask.src_mac,
5848                                 fs->m_u.ether_spec.h_source);
5849
5850                 ether_addr_copy(rule->tuples.dst_mac,
5851                                 fs->h_u.ether_spec.h_dest);
5852                 ether_addr_copy(rule->tuples_mask.dst_mac,
5853                                 fs->m_u.ether_spec.h_dest);
5854
5855                 rule->tuples.ether_proto =
5856                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5857                 rule->tuples_mask.ether_proto =
5858                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5859
5860                 break;
5861         default:
5862                 return -EOPNOTSUPP;
5863         }
5864
5865         switch (flow_type) {
5866         case SCTP_V4_FLOW:
5867         case SCTP_V6_FLOW:
5868                 rule->tuples.ip_proto = IPPROTO_SCTP;
5869                 rule->tuples_mask.ip_proto = 0xFF;
5870                 break;
5871         case TCP_V4_FLOW:
5872         case TCP_V6_FLOW:
5873                 rule->tuples.ip_proto = IPPROTO_TCP;
5874                 rule->tuples_mask.ip_proto = 0xFF;
5875                 break;
5876         case UDP_V4_FLOW:
5877         case UDP_V6_FLOW:
5878                 rule->tuples.ip_proto = IPPROTO_UDP;
5879                 rule->tuples_mask.ip_proto = 0xFF;
5880                 break;
5881         default:
5882                 break;
5883         }
5884
5885         if (fs->flow_type & FLOW_EXT) {
5886                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5887                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5888         }
5889
5890         if (fs->flow_type & FLOW_MAC_EXT) {
5891                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5892                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5893         }
5894
5895         return 0;
5896 }
5897
5898 /* make sure being called after lock up with fd_rule_lock */
5899 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5900                                 struct hclge_fd_rule *rule)
5901 {
5902         int ret;
5903
5904         if (!rule) {
5905                 dev_err(&hdev->pdev->dev,
5906                         "The flow director rule is NULL\n");
5907                 return -EINVAL;
5908         }
5909
5910         /* it will never fail here, so needn't to check return value */
5911         hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5912
5913         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5914         if (ret)
5915                 goto clear_rule;
5916
5917         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5918         if (ret)
5919                 goto clear_rule;
5920
5921         return 0;
5922
5923 clear_rule:
5924         hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5925         return ret;
5926 }
5927
5928 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
5929 {
5930         struct hclge_vport *vport = hclge_get_vport(handle);
5931         struct hclge_dev *hdev = vport->back;
5932
5933         return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
5934 }
5935
5936 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5937                               struct ethtool_rxnfc *cmd)
5938 {
5939         struct hclge_vport *vport = hclge_get_vport(handle);
5940         struct hclge_dev *hdev = vport->back;
5941         u16 dst_vport_id = 0, q_index = 0;
5942         struct ethtool_rx_flow_spec *fs;
5943         struct hclge_fd_rule *rule;
5944         u32 unused = 0;
5945         u8 action;
5946         int ret;
5947
5948         if (!hnae3_dev_fd_supported(hdev)) {
5949                 dev_err(&hdev->pdev->dev,
5950                         "flow table director is not supported\n");
5951                 return -EOPNOTSUPP;
5952         }
5953
5954         if (!hdev->fd_en) {
5955                 dev_err(&hdev->pdev->dev,
5956                         "please enable flow director first\n");
5957                 return -EOPNOTSUPP;
5958         }
5959
5960         if (hclge_is_cls_flower_active(handle)) {
5961                 dev_err(&hdev->pdev->dev,
5962                         "please delete all exist cls flower rules first\n");
5963                 return -EINVAL;
5964         }
5965
5966         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5967
5968         ret = hclge_fd_check_spec(hdev, fs, &unused);
5969         if (ret)
5970                 return ret;
5971
5972         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5973                 action = HCLGE_FD_ACTION_DROP_PACKET;
5974         } else {
5975                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5976                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5977                 u16 tqps;
5978
5979                 if (vf > hdev->num_req_vfs) {
5980                         dev_err(&hdev->pdev->dev,
5981                                 "Error: vf id (%u) > max vf num (%u)\n",
5982                                 vf, hdev->num_req_vfs);
5983                         return -EINVAL;
5984                 }
5985
5986                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5987                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5988
5989                 if (ring >= tqps) {
5990                         dev_err(&hdev->pdev->dev,
5991                                 "Error: queue id (%u) > max tqp num (%u)\n",
5992                                 ring, tqps - 1);
5993                         return -EINVAL;
5994                 }
5995
5996                 action = HCLGE_FD_ACTION_SELECT_QUEUE;
5997                 q_index = ring;
5998         }
5999
6000         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6001         if (!rule)
6002                 return -ENOMEM;
6003
6004         ret = hclge_fd_get_tuple(hdev, fs, rule);
6005         if (ret) {
6006                 kfree(rule);
6007                 return ret;
6008         }
6009
6010         rule->flow_type = fs->flow_type;
6011         rule->location = fs->location;
6012         rule->unused_tuple = unused;
6013         rule->vf_id = dst_vport_id;
6014         rule->queue_id = q_index;
6015         rule->action = action;
6016         rule->rule_type = HCLGE_FD_EP_ACTIVE;
6017
6018         /* to avoid rule conflict, when user configure rule by ethtool,
6019          * we need to clear all arfs rules
6020          */
6021         spin_lock_bh(&hdev->fd_rule_lock);
6022         hclge_clear_arfs_rules(handle);
6023
6024         ret = hclge_fd_config_rule(hdev, rule);
6025
6026         spin_unlock_bh(&hdev->fd_rule_lock);
6027
6028         return ret;
6029 }
6030
6031 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6032                               struct ethtool_rxnfc *cmd)
6033 {
6034         struct hclge_vport *vport = hclge_get_vport(handle);
6035         struct hclge_dev *hdev = vport->back;
6036         struct ethtool_rx_flow_spec *fs;
6037         int ret;
6038
6039         if (!hnae3_dev_fd_supported(hdev))
6040                 return -EOPNOTSUPP;
6041
6042         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6043
6044         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6045                 return -EINVAL;
6046
6047         if (hclge_is_cls_flower_active(handle) || !hdev->hclge_fd_rule_num ||
6048             !hclge_fd_rule_exist(hdev, fs->location)) {
6049                 dev_err(&hdev->pdev->dev,
6050                         "Delete fail, rule %u is inexistent\n", fs->location);
6051                 return -ENOENT;
6052         }
6053
6054         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6055                                    NULL, false);
6056         if (ret)
6057                 return ret;
6058
6059         spin_lock_bh(&hdev->fd_rule_lock);
6060         ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
6061
6062         spin_unlock_bh(&hdev->fd_rule_lock);
6063
6064         return ret;
6065 }
6066
6067 /* make sure being called after lock up with fd_rule_lock */
6068 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
6069                                      bool clear_list)
6070 {
6071         struct hclge_vport *vport = hclge_get_vport(handle);
6072         struct hclge_dev *hdev = vport->back;
6073         struct hclge_fd_rule *rule;
6074         struct hlist_node *node;
6075         u16 location;
6076
6077         if (!hnae3_dev_fd_supported(hdev))
6078                 return;
6079
6080         for_each_set_bit(location, hdev->fd_bmap,
6081                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6082                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6083                                      NULL, false);
6084
6085         if (clear_list) {
6086                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6087                                           rule_node) {
6088                         hlist_del(&rule->rule_node);
6089                         kfree(rule);
6090                 }
6091                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6092                 hdev->hclge_fd_rule_num = 0;
6093                 bitmap_zero(hdev->fd_bmap,
6094                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6095         }
6096 }
6097
6098 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6099 {
6100         struct hclge_vport *vport = hclge_get_vport(handle);
6101         struct hclge_dev *hdev = vport->back;
6102         struct hclge_fd_rule *rule;
6103         struct hlist_node *node;
6104         int ret;
6105
6106         /* Return ok here, because reset error handling will check this
6107          * return value. If error is returned here, the reset process will
6108          * fail.
6109          */
6110         if (!hnae3_dev_fd_supported(hdev))
6111                 return 0;
6112
6113         /* if fd is disabled, should not restore it when reset */
6114         if (!hdev->fd_en)
6115                 return 0;
6116
6117         spin_lock_bh(&hdev->fd_rule_lock);
6118         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6119                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6120                 if (!ret)
6121                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6122
6123                 if (ret) {
6124                         dev_warn(&hdev->pdev->dev,
6125                                  "Restore rule %u failed, remove it\n",
6126                                  rule->location);
6127                         clear_bit(rule->location, hdev->fd_bmap);
6128                         hlist_del(&rule->rule_node);
6129                         kfree(rule);
6130                         hdev->hclge_fd_rule_num--;
6131                 }
6132         }
6133
6134         if (hdev->hclge_fd_rule_num)
6135                 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
6136
6137         spin_unlock_bh(&hdev->fd_rule_lock);
6138
6139         return 0;
6140 }
6141
6142 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6143                                  struct ethtool_rxnfc *cmd)
6144 {
6145         struct hclge_vport *vport = hclge_get_vport(handle);
6146         struct hclge_dev *hdev = vport->back;
6147
6148         if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6149                 return -EOPNOTSUPP;
6150
6151         cmd->rule_cnt = hdev->hclge_fd_rule_num;
6152         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6153
6154         return 0;
6155 }
6156
6157 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6158                                      struct ethtool_tcpip4_spec *spec,
6159                                      struct ethtool_tcpip4_spec *spec_mask)
6160 {
6161         spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6162         spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6163                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6164
6165         spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6166         spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6167                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6168
6169         spec->psrc = cpu_to_be16(rule->tuples.src_port);
6170         spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6171                         0 : cpu_to_be16(rule->tuples_mask.src_port);
6172
6173         spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6174         spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6175                         0 : cpu_to_be16(rule->tuples_mask.dst_port);
6176
6177         spec->tos = rule->tuples.ip_tos;
6178         spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6179                         0 : rule->tuples_mask.ip_tos;
6180 }
6181
6182 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6183                                   struct ethtool_usrip4_spec *spec,
6184                                   struct ethtool_usrip4_spec *spec_mask)
6185 {
6186         spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6187         spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6188                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6189
6190         spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6191         spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6192                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6193
6194         spec->tos = rule->tuples.ip_tos;
6195         spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6196                         0 : rule->tuples_mask.ip_tos;
6197
6198         spec->proto = rule->tuples.ip_proto;
6199         spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6200                         0 : rule->tuples_mask.ip_proto;
6201
6202         spec->ip_ver = ETH_RX_NFC_IP4;
6203 }
6204
6205 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6206                                      struct ethtool_tcpip6_spec *spec,
6207                                      struct ethtool_tcpip6_spec *spec_mask)
6208 {
6209         cpu_to_be32_array(spec->ip6src,
6210                           rule->tuples.src_ip, IPV6_SIZE);
6211         cpu_to_be32_array(spec->ip6dst,
6212                           rule->tuples.dst_ip, IPV6_SIZE);
6213         if (rule->unused_tuple & BIT(INNER_SRC_IP))
6214                 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6215         else
6216                 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6217                                   IPV6_SIZE);
6218
6219         if (rule->unused_tuple & BIT(INNER_DST_IP))
6220                 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6221         else
6222                 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6223                                   IPV6_SIZE);
6224
6225         spec->psrc = cpu_to_be16(rule->tuples.src_port);
6226         spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6227                         0 : cpu_to_be16(rule->tuples_mask.src_port);
6228
6229         spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6230         spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6231                         0 : cpu_to_be16(rule->tuples_mask.dst_port);
6232 }
6233
6234 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6235                                   struct ethtool_usrip6_spec *spec,
6236                                   struct ethtool_usrip6_spec *spec_mask)
6237 {
6238         cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6239         cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6240         if (rule->unused_tuple & BIT(INNER_SRC_IP))
6241                 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6242         else
6243                 cpu_to_be32_array(spec_mask->ip6src,
6244                                   rule->tuples_mask.src_ip, IPV6_SIZE);
6245
6246         if (rule->unused_tuple & BIT(INNER_DST_IP))
6247                 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6248         else
6249                 cpu_to_be32_array(spec_mask->ip6dst,
6250                                   rule->tuples_mask.dst_ip, IPV6_SIZE);
6251
6252         spec->l4_proto = rule->tuples.ip_proto;
6253         spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6254                         0 : rule->tuples_mask.ip_proto;
6255 }
6256
6257 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6258                                     struct ethhdr *spec,
6259                                     struct ethhdr *spec_mask)
6260 {
6261         ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6262         ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6263
6264         if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6265                 eth_zero_addr(spec_mask->h_source);
6266         else
6267                 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6268
6269         if (rule->unused_tuple & BIT(INNER_DST_MAC))
6270                 eth_zero_addr(spec_mask->h_dest);
6271         else
6272                 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6273
6274         spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6275         spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6276                         0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6277 }
6278
6279 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6280                                   struct hclge_fd_rule *rule)
6281 {
6282         if (fs->flow_type & FLOW_EXT) {
6283                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6284                 fs->m_ext.vlan_tci =
6285                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6286                                 cpu_to_be16(VLAN_VID_MASK) :
6287                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
6288         }
6289
6290         if (fs->flow_type & FLOW_MAC_EXT) {
6291                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6292                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6293                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
6294                 else
6295                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
6296                                         rule->tuples_mask.dst_mac);
6297         }
6298 }
6299
6300 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6301                                   struct ethtool_rxnfc *cmd)
6302 {
6303         struct hclge_vport *vport = hclge_get_vport(handle);
6304         struct hclge_fd_rule *rule = NULL;
6305         struct hclge_dev *hdev = vport->back;
6306         struct ethtool_rx_flow_spec *fs;
6307         struct hlist_node *node2;
6308
6309         if (!hnae3_dev_fd_supported(hdev))
6310                 return -EOPNOTSUPP;
6311
6312         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6313
6314         spin_lock_bh(&hdev->fd_rule_lock);
6315
6316         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6317                 if (rule->location >= fs->location)
6318                         break;
6319         }
6320
6321         if (!rule || fs->location != rule->location) {
6322                 spin_unlock_bh(&hdev->fd_rule_lock);
6323
6324                 return -ENOENT;
6325         }
6326
6327         fs->flow_type = rule->flow_type;
6328         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6329         case SCTP_V4_FLOW:
6330         case TCP_V4_FLOW:
6331         case UDP_V4_FLOW:
6332                 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6333                                          &fs->m_u.tcp_ip4_spec);
6334                 break;
6335         case IP_USER_FLOW:
6336                 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6337                                       &fs->m_u.usr_ip4_spec);
6338                 break;
6339         case SCTP_V6_FLOW:
6340         case TCP_V6_FLOW:
6341         case UDP_V6_FLOW:
6342                 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6343                                          &fs->m_u.tcp_ip6_spec);
6344                 break;
6345         case IPV6_USER_FLOW:
6346                 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6347                                       &fs->m_u.usr_ip6_spec);
6348                 break;
6349         /* The flow type of fd rule has been checked before adding in to rule
6350          * list. As other flow types have been handled, it must be ETHER_FLOW
6351          * for the default case
6352          */
6353         default:
6354                 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6355                                         &fs->m_u.ether_spec);
6356                 break;
6357         }
6358
6359         hclge_fd_get_ext_info(fs, rule);
6360
6361         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6362                 fs->ring_cookie = RX_CLS_FLOW_DISC;
6363         } else {
6364                 u64 vf_id;
6365
6366                 fs->ring_cookie = rule->queue_id;
6367                 vf_id = rule->vf_id;
6368                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6369                 fs->ring_cookie |= vf_id;
6370         }
6371
6372         spin_unlock_bh(&hdev->fd_rule_lock);
6373
6374         return 0;
6375 }
6376
6377 static int hclge_get_all_rules(struct hnae3_handle *handle,
6378                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
6379 {
6380         struct hclge_vport *vport = hclge_get_vport(handle);
6381         struct hclge_dev *hdev = vport->back;
6382         struct hclge_fd_rule *rule;
6383         struct hlist_node *node2;
6384         int cnt = 0;
6385
6386         if (!hnae3_dev_fd_supported(hdev))
6387                 return -EOPNOTSUPP;
6388
6389         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6390
6391         spin_lock_bh(&hdev->fd_rule_lock);
6392         hlist_for_each_entry_safe(rule, node2,
6393                                   &hdev->fd_rule_list, rule_node) {
6394                 if (cnt == cmd->rule_cnt) {
6395                         spin_unlock_bh(&hdev->fd_rule_lock);
6396                         return -EMSGSIZE;
6397                 }
6398
6399                 rule_locs[cnt] = rule->location;
6400                 cnt++;
6401         }
6402
6403         spin_unlock_bh(&hdev->fd_rule_lock);
6404
6405         cmd->rule_cnt = cnt;
6406
6407         return 0;
6408 }
6409
6410 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6411                                      struct hclge_fd_rule_tuples *tuples)
6412 {
6413 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6414 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6415
6416         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6417         tuples->ip_proto = fkeys->basic.ip_proto;
6418         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6419
6420         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6421                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6422                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6423         } else {
6424                 int i;
6425
6426                 for (i = 0; i < IPV6_SIZE; i++) {
6427                         tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6428                         tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6429                 }
6430         }
6431 }
6432
6433 /* traverse all rules, check whether an existed rule has the same tuples */
6434 static struct hclge_fd_rule *
6435 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6436                           const struct hclge_fd_rule_tuples *tuples)
6437 {
6438         struct hclge_fd_rule *rule = NULL;
6439         struct hlist_node *node;
6440
6441         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6442                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6443                         return rule;
6444         }
6445
6446         return NULL;
6447 }
6448
6449 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6450                                      struct hclge_fd_rule *rule)
6451 {
6452         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6453                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6454                              BIT(INNER_SRC_PORT);
6455         rule->action = 0;
6456         rule->vf_id = 0;
6457         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6458         if (tuples->ether_proto == ETH_P_IP) {
6459                 if (tuples->ip_proto == IPPROTO_TCP)
6460                         rule->flow_type = TCP_V4_FLOW;
6461                 else
6462                         rule->flow_type = UDP_V4_FLOW;
6463         } else {
6464                 if (tuples->ip_proto == IPPROTO_TCP)
6465                         rule->flow_type = TCP_V6_FLOW;
6466                 else
6467                         rule->flow_type = UDP_V6_FLOW;
6468         }
6469         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6470         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6471 }
6472
6473 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6474                                       u16 flow_id, struct flow_keys *fkeys)
6475 {
6476         struct hclge_vport *vport = hclge_get_vport(handle);
6477         struct hclge_fd_rule_tuples new_tuples = {};
6478         struct hclge_dev *hdev = vport->back;
6479         struct hclge_fd_rule *rule;
6480         u16 tmp_queue_id;
6481         u16 bit_id;
6482         int ret;
6483
6484         if (!hnae3_dev_fd_supported(hdev))
6485                 return -EOPNOTSUPP;
6486
6487         /* when there is already fd rule existed add by user,
6488          * arfs should not work
6489          */
6490         spin_lock_bh(&hdev->fd_rule_lock);
6491         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
6492             hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
6493                 spin_unlock_bh(&hdev->fd_rule_lock);
6494                 return -EOPNOTSUPP;
6495         }
6496
6497         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6498
6499         /* check is there flow director filter existed for this flow,
6500          * if not, create a new filter for it;
6501          * if filter exist with different queue id, modify the filter;
6502          * if filter exist with same queue id, do nothing
6503          */
6504         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6505         if (!rule) {
6506                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6507                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6508                         spin_unlock_bh(&hdev->fd_rule_lock);
6509                         return -ENOSPC;
6510                 }
6511
6512                 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6513                 if (!rule) {
6514                         spin_unlock_bh(&hdev->fd_rule_lock);
6515                         return -ENOMEM;
6516                 }
6517
6518                 set_bit(bit_id, hdev->fd_bmap);
6519                 rule->location = bit_id;
6520                 rule->arfs.flow_id = flow_id;
6521                 rule->queue_id = queue_id;
6522                 hclge_fd_build_arfs_rule(&new_tuples, rule);
6523                 ret = hclge_fd_config_rule(hdev, rule);
6524
6525                 spin_unlock_bh(&hdev->fd_rule_lock);
6526
6527                 if (ret)
6528                         return ret;
6529
6530                 return rule->location;
6531         }
6532
6533         spin_unlock_bh(&hdev->fd_rule_lock);
6534
6535         if (rule->queue_id == queue_id)
6536                 return rule->location;
6537
6538         tmp_queue_id = rule->queue_id;
6539         rule->queue_id = queue_id;
6540         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6541         if (ret) {
6542                 rule->queue_id = tmp_queue_id;
6543                 return ret;
6544         }
6545
6546         return rule->location;
6547 }
6548
6549 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6550 {
6551 #ifdef CONFIG_RFS_ACCEL
6552         struct hnae3_handle *handle = &hdev->vport[0].nic;
6553         struct hclge_fd_rule *rule;
6554         struct hlist_node *node;
6555         HLIST_HEAD(del_list);
6556
6557         spin_lock_bh(&hdev->fd_rule_lock);
6558         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6559                 spin_unlock_bh(&hdev->fd_rule_lock);
6560                 return;
6561         }
6562         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6563                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6564                                         rule->arfs.flow_id, rule->location)) {
6565                         hlist_del_init(&rule->rule_node);
6566                         hlist_add_head(&rule->rule_node, &del_list);
6567                         hdev->hclge_fd_rule_num--;
6568                         clear_bit(rule->location, hdev->fd_bmap);
6569                 }
6570         }
6571         spin_unlock_bh(&hdev->fd_rule_lock);
6572
6573         hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6574                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6575                                      rule->location, NULL, false);
6576                 kfree(rule);
6577         }
6578 #endif
6579 }
6580
6581 /* make sure being called after lock up with fd_rule_lock */
6582 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6583 {
6584 #ifdef CONFIG_RFS_ACCEL
6585         struct hclge_vport *vport = hclge_get_vport(handle);
6586         struct hclge_dev *hdev = vport->back;
6587
6588         if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6589                 hclge_del_all_fd_entries(handle, true);
6590 #endif
6591 }
6592
6593 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
6594                                     struct hclge_fd_rule *rule)
6595 {
6596         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
6597                 struct flow_match_basic match;
6598                 u16 ethtype_key, ethtype_mask;
6599
6600                 flow_rule_match_basic(flow, &match);
6601                 ethtype_key = ntohs(match.key->n_proto);
6602                 ethtype_mask = ntohs(match.mask->n_proto);
6603
6604                 if (ethtype_key == ETH_P_ALL) {
6605                         ethtype_key = 0;
6606                         ethtype_mask = 0;
6607                 }
6608                 rule->tuples.ether_proto = ethtype_key;
6609                 rule->tuples_mask.ether_proto = ethtype_mask;
6610                 rule->tuples.ip_proto = match.key->ip_proto;
6611                 rule->tuples_mask.ip_proto = match.mask->ip_proto;
6612         } else {
6613                 rule->unused_tuple |= BIT(INNER_IP_PROTO);
6614                 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
6615         }
6616 }
6617
6618 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
6619                                   struct hclge_fd_rule *rule)
6620 {
6621         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
6622                 struct flow_match_eth_addrs match;
6623
6624                 flow_rule_match_eth_addrs(flow, &match);
6625                 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
6626                 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
6627                 ether_addr_copy(rule->tuples.src_mac, match.key->src);
6628                 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
6629         } else {
6630                 rule->unused_tuple |= BIT(INNER_DST_MAC);
6631                 rule->unused_tuple |= BIT(INNER_SRC_MAC);
6632         }
6633 }
6634
6635 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
6636                                    struct hclge_fd_rule *rule)
6637 {
6638         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
6639                 struct flow_match_vlan match;
6640
6641                 flow_rule_match_vlan(flow, &match);
6642                 rule->tuples.vlan_tag1 = match.key->vlan_id |
6643                                 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
6644                 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
6645                                 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
6646         } else {
6647                 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6648         }
6649 }
6650
6651 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
6652                                  struct hclge_fd_rule *rule)
6653 {
6654         u16 addr_type = 0;
6655
6656         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
6657                 struct flow_match_control match;
6658
6659                 flow_rule_match_control(flow, &match);
6660                 addr_type = match.key->addr_type;
6661         }
6662
6663         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
6664                 struct flow_match_ipv4_addrs match;
6665
6666                 flow_rule_match_ipv4_addrs(flow, &match);
6667                 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
6668                 rule->tuples_mask.src_ip[IPV4_INDEX] =
6669                                                 be32_to_cpu(match.mask->src);
6670                 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
6671                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6672                                                 be32_to_cpu(match.mask->dst);
6673         } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
6674                 struct flow_match_ipv6_addrs match;
6675
6676                 flow_rule_match_ipv6_addrs(flow, &match);
6677                 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
6678                                   IPV6_SIZE);
6679                 be32_to_cpu_array(rule->tuples_mask.src_ip,
6680                                   match.mask->src.s6_addr32, IPV6_SIZE);
6681                 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
6682                                   IPV6_SIZE);
6683                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
6684                                   match.mask->dst.s6_addr32, IPV6_SIZE);
6685         } else {
6686                 rule->unused_tuple |= BIT(INNER_SRC_IP);
6687                 rule->unused_tuple |= BIT(INNER_DST_IP);
6688         }
6689 }
6690
6691 static void hclge_get_cls_key_port(const struct flow_rule *flow,
6692                                    struct hclge_fd_rule *rule)
6693 {
6694         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
6695                 struct flow_match_ports match;
6696
6697                 flow_rule_match_ports(flow, &match);
6698
6699                 rule->tuples.src_port = be16_to_cpu(match.key->src);
6700                 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
6701                 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
6702                 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
6703         } else {
6704                 rule->unused_tuple |= BIT(INNER_SRC_PORT);
6705                 rule->unused_tuple |= BIT(INNER_DST_PORT);
6706         }
6707 }
6708
6709 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
6710                                   struct flow_cls_offload *cls_flower,
6711                                   struct hclge_fd_rule *rule)
6712 {
6713         struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
6714         struct flow_dissector *dissector = flow->match.dissector;
6715
6716         if (dissector->used_keys &
6717             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
6718               BIT(FLOW_DISSECTOR_KEY_BASIC) |
6719               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
6720               BIT(FLOW_DISSECTOR_KEY_VLAN) |
6721               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
6722               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
6723               BIT(FLOW_DISSECTOR_KEY_PORTS))) {
6724                 dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
6725                         dissector->used_keys);
6726                 return -EOPNOTSUPP;
6727         }
6728
6729         hclge_get_cls_key_basic(flow, rule);
6730         hclge_get_cls_key_mac(flow, rule);
6731         hclge_get_cls_key_vlan(flow, rule);
6732         hclge_get_cls_key_ip(flow, rule);
6733         hclge_get_cls_key_port(flow, rule);
6734
6735         return 0;
6736 }
6737
6738 static int hclge_check_cls_flower(struct hclge_dev *hdev,
6739                                   struct flow_cls_offload *cls_flower, int tc)
6740 {
6741         u32 prio = cls_flower->common.prio;
6742
6743         if (tc < 0 || tc > hdev->tc_max) {
6744                 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
6745                 return -EINVAL;
6746         }
6747
6748         if (prio == 0 ||
6749             prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6750                 dev_err(&hdev->pdev->dev,
6751                         "prio %u should be in range[1, %u]\n",
6752                         prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6753                 return -EINVAL;
6754         }
6755
6756         if (test_bit(prio - 1, hdev->fd_bmap)) {
6757                 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
6758                 return -EINVAL;
6759         }
6760         return 0;
6761 }
6762
6763 static int hclge_add_cls_flower(struct hnae3_handle *handle,
6764                                 struct flow_cls_offload *cls_flower,
6765                                 int tc)
6766 {
6767         struct hclge_vport *vport = hclge_get_vport(handle);
6768         struct hclge_dev *hdev = vport->back;
6769         struct hclge_fd_rule *rule;
6770         int ret;
6771
6772         if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6773                 dev_err(&hdev->pdev->dev,
6774                         "please remove all exist fd rules via ethtool first\n");
6775                 return -EINVAL;
6776         }
6777
6778         ret = hclge_check_cls_flower(hdev, cls_flower, tc);
6779         if (ret) {
6780                 dev_err(&hdev->pdev->dev,
6781                         "failed to check cls flower params, ret = %d\n", ret);
6782                 return ret;
6783         }
6784
6785         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6786         if (!rule)
6787                 return -ENOMEM;
6788
6789         ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
6790         if (ret)
6791                 goto err;
6792
6793         rule->action = HCLGE_FD_ACTION_SELECT_TC;
6794         rule->cls_flower.tc = tc;
6795         rule->location = cls_flower->common.prio - 1;
6796         rule->vf_id = 0;
6797         rule->cls_flower.cookie = cls_flower->cookie;
6798         rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
6799
6800         spin_lock_bh(&hdev->fd_rule_lock);
6801         hclge_clear_arfs_rules(handle);
6802
6803         ret = hclge_fd_config_rule(hdev, rule);
6804
6805         spin_unlock_bh(&hdev->fd_rule_lock);
6806
6807         if (ret) {
6808                 dev_err(&hdev->pdev->dev,
6809                         "failed to add cls flower rule, ret = %d\n", ret);
6810                 goto err;
6811         }
6812
6813         return 0;
6814 err:
6815         kfree(rule);
6816         return ret;
6817 }
6818
6819 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
6820                                                    unsigned long cookie)
6821 {
6822         struct hclge_fd_rule *rule;
6823         struct hlist_node *node;
6824
6825         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6826                 if (rule->cls_flower.cookie == cookie)
6827                         return rule;
6828         }
6829
6830         return NULL;
6831 }
6832
6833 static int hclge_del_cls_flower(struct hnae3_handle *handle,
6834                                 struct flow_cls_offload *cls_flower)
6835 {
6836         struct hclge_vport *vport = hclge_get_vport(handle);
6837         struct hclge_dev *hdev = vport->back;
6838         struct hclge_fd_rule *rule;
6839         int ret;
6840
6841         spin_lock_bh(&hdev->fd_rule_lock);
6842
6843         rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
6844         if (!rule) {
6845                 spin_unlock_bh(&hdev->fd_rule_lock);
6846                 return -EINVAL;
6847         }
6848
6849         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
6850                                    NULL, false);
6851         if (ret) {
6852                 dev_err(&hdev->pdev->dev,
6853                         "failed to delete cls flower rule %u, ret = %d\n",
6854                         rule->location, ret);
6855                 spin_unlock_bh(&hdev->fd_rule_lock);
6856                 return ret;
6857         }
6858
6859         ret = hclge_fd_update_rule_list(hdev, NULL, rule->location, false);
6860         if (ret) {
6861                 dev_err(&hdev->pdev->dev,
6862                         "failed to delete cls flower rule %u in list, ret = %d\n",
6863                         rule->location, ret);
6864                 spin_unlock_bh(&hdev->fd_rule_lock);
6865                 return ret;
6866         }
6867
6868         spin_unlock_bh(&hdev->fd_rule_lock);
6869
6870         return 0;
6871 }
6872
6873 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6874 {
6875         struct hclge_vport *vport = hclge_get_vport(handle);
6876         struct hclge_dev *hdev = vport->back;
6877
6878         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6879                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6880 }
6881
6882 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
6883 {
6884         struct hclge_vport *vport = hclge_get_vport(handle);
6885         struct hclge_dev *hdev = vport->back;
6886
6887         return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
6888 }
6889
6890 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6891 {
6892         struct hclge_vport *vport = hclge_get_vport(handle);
6893         struct hclge_dev *hdev = vport->back;
6894
6895         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6896 }
6897
6898 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6899 {
6900         struct hclge_vport *vport = hclge_get_vport(handle);
6901         struct hclge_dev *hdev = vport->back;
6902
6903         return hdev->rst_stats.hw_reset_done_cnt;
6904 }
6905
6906 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6907 {
6908         struct hclge_vport *vport = hclge_get_vport(handle);
6909         struct hclge_dev *hdev = vport->back;
6910         bool clear;
6911
6912         hdev->fd_en = enable;
6913         clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6914
6915         if (!enable) {
6916                 spin_lock_bh(&hdev->fd_rule_lock);
6917                 hclge_del_all_fd_entries(handle, clear);
6918                 spin_unlock_bh(&hdev->fd_rule_lock);
6919         } else {
6920                 hclge_restore_fd_entries(handle);
6921         }
6922 }
6923
6924 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6925 {
6926         struct hclge_desc desc;
6927         struct hclge_config_mac_mode_cmd *req =
6928                 (struct hclge_config_mac_mode_cmd *)desc.data;
6929         u32 loop_en = 0;
6930         int ret;
6931
6932         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6933
6934         if (enable) {
6935                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6936                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6937                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6938                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6939                 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6940                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6941                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6942                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6943                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6944                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6945         }
6946
6947         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6948
6949         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6950         if (ret)
6951                 dev_err(&hdev->pdev->dev,
6952                         "mac enable fail, ret =%d.\n", ret);
6953 }
6954
6955 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6956                                      u8 switch_param, u8 param_mask)
6957 {
6958         struct hclge_mac_vlan_switch_cmd *req;
6959         struct hclge_desc desc;
6960         u32 func_id;
6961         int ret;
6962
6963         func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6964         req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6965
6966         /* read current config parameter */
6967         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6968                                    true);
6969         req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6970         req->func_id = cpu_to_le32(func_id);
6971
6972         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6973         if (ret) {
6974                 dev_err(&hdev->pdev->dev,
6975                         "read mac vlan switch parameter fail, ret = %d\n", ret);
6976                 return ret;
6977         }
6978
6979         /* modify and write new config parameter */
6980         hclge_cmd_reuse_desc(&desc, false);
6981         req->switch_param = (req->switch_param & param_mask) | switch_param;
6982         req->param_mask = param_mask;
6983
6984         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6985         if (ret)
6986                 dev_err(&hdev->pdev->dev,
6987                         "set mac vlan switch parameter fail, ret = %d\n", ret);
6988         return ret;
6989 }
6990
6991 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6992                                        int link_ret)
6993 {
6994 #define HCLGE_PHY_LINK_STATUS_NUM  200
6995
6996         struct phy_device *phydev = hdev->hw.mac.phydev;
6997         int i = 0;
6998         int ret;
6999
7000         do {
7001                 ret = phy_read_status(phydev);
7002                 if (ret) {
7003                         dev_err(&hdev->pdev->dev,
7004                                 "phy update link status fail, ret = %d\n", ret);
7005                         return;
7006                 }
7007
7008                 if (phydev->link == link_ret)
7009                         break;
7010
7011                 msleep(HCLGE_LINK_STATUS_MS);
7012         } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7013 }
7014
7015 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7016 {
7017 #define HCLGE_MAC_LINK_STATUS_NUM  100
7018
7019         int link_status;
7020         int i = 0;
7021         int ret;
7022
7023         do {
7024                 ret = hclge_get_mac_link_status(hdev, &link_status);
7025                 if (ret)
7026                         return ret;
7027                 if (link_status == link_ret)
7028                         return 0;
7029
7030                 msleep(HCLGE_LINK_STATUS_MS);
7031         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7032         return -EBUSY;
7033 }
7034
7035 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7036                                           bool is_phy)
7037 {
7038         int link_ret;
7039
7040         link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7041
7042         if (is_phy)
7043                 hclge_phy_link_status_wait(hdev, link_ret);
7044
7045         return hclge_mac_link_status_wait(hdev, link_ret);
7046 }
7047
7048 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7049 {
7050         struct hclge_config_mac_mode_cmd *req;
7051         struct hclge_desc desc;
7052         u32 loop_en;
7053         int ret;
7054
7055         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7056         /* 1 Read out the MAC mode config at first */
7057         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7058         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7059         if (ret) {
7060                 dev_err(&hdev->pdev->dev,
7061                         "mac loopback get fail, ret =%d.\n", ret);
7062                 return ret;
7063         }
7064
7065         /* 2 Then setup the loopback flag */
7066         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7067         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7068
7069         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7070
7071         /* 3 Config mac work mode with loopback flag
7072          * and its original configure parameters
7073          */
7074         hclge_cmd_reuse_desc(&desc, false);
7075         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7076         if (ret)
7077                 dev_err(&hdev->pdev->dev,
7078                         "mac loopback set fail, ret =%d.\n", ret);
7079         return ret;
7080 }
7081
7082 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
7083                                      enum hnae3_loop loop_mode)
7084 {
7085 #define HCLGE_SERDES_RETRY_MS   10
7086 #define HCLGE_SERDES_RETRY_NUM  100
7087
7088         struct hclge_serdes_lb_cmd *req;
7089         struct hclge_desc desc;
7090         int ret, i = 0;
7091         u8 loop_mode_b;
7092
7093         req = (struct hclge_serdes_lb_cmd *)desc.data;
7094         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
7095
7096         switch (loop_mode) {
7097         case HNAE3_LOOP_SERIAL_SERDES:
7098                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7099                 break;
7100         case HNAE3_LOOP_PARALLEL_SERDES:
7101                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7102                 break;
7103         default:
7104                 dev_err(&hdev->pdev->dev,
7105                         "unsupported serdes loopback mode %d\n", loop_mode);
7106                 return -ENOTSUPP;
7107         }
7108
7109         if (en) {
7110                 req->enable = loop_mode_b;
7111                 req->mask = loop_mode_b;
7112         } else {
7113                 req->mask = loop_mode_b;
7114         }
7115
7116         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7117         if (ret) {
7118                 dev_err(&hdev->pdev->dev,
7119                         "serdes loopback set fail, ret = %d\n", ret);
7120                 return ret;
7121         }
7122
7123         do {
7124                 msleep(HCLGE_SERDES_RETRY_MS);
7125                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
7126                                            true);
7127                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7128                 if (ret) {
7129                         dev_err(&hdev->pdev->dev,
7130                                 "serdes loopback get, ret = %d\n", ret);
7131                         return ret;
7132                 }
7133         } while (++i < HCLGE_SERDES_RETRY_NUM &&
7134                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
7135
7136         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
7137                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
7138                 return -EBUSY;
7139         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
7140                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
7141                 return -EIO;
7142         }
7143         return ret;
7144 }
7145
7146 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
7147                                      enum hnae3_loop loop_mode)
7148 {
7149         int ret;
7150
7151         ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
7152         if (ret)
7153                 return ret;
7154
7155         hclge_cfg_mac_mode(hdev, en);
7156
7157         ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7158         if (ret)
7159                 dev_err(&hdev->pdev->dev,
7160                         "serdes loopback config mac mode timeout\n");
7161
7162         return ret;
7163 }
7164
7165 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7166                                      struct phy_device *phydev)
7167 {
7168         int ret;
7169
7170         if (!phydev->suspended) {
7171                 ret = phy_suspend(phydev);
7172                 if (ret)
7173                         return ret;
7174         }
7175
7176         ret = phy_resume(phydev);
7177         if (ret)
7178                 return ret;
7179
7180         return phy_loopback(phydev, true);
7181 }
7182
7183 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7184                                       struct phy_device *phydev)
7185 {
7186         int ret;
7187
7188         ret = phy_loopback(phydev, false);
7189         if (ret)
7190                 return ret;
7191
7192         return phy_suspend(phydev);
7193 }
7194
7195 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7196 {
7197         struct phy_device *phydev = hdev->hw.mac.phydev;
7198         int ret;
7199
7200         if (!phydev)
7201                 return -ENOTSUPP;
7202
7203         if (en)
7204                 ret = hclge_enable_phy_loopback(hdev, phydev);
7205         else
7206                 ret = hclge_disable_phy_loopback(hdev, phydev);
7207         if (ret) {
7208                 dev_err(&hdev->pdev->dev,
7209                         "set phy loopback fail, ret = %d\n", ret);
7210                 return ret;
7211         }
7212
7213         hclge_cfg_mac_mode(hdev, en);
7214
7215         ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7216         if (ret)
7217                 dev_err(&hdev->pdev->dev,
7218                         "phy loopback config mac mode timeout\n");
7219
7220         return ret;
7221 }
7222
7223 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
7224                             int stream_id, bool enable)
7225 {
7226         struct hclge_desc desc;
7227         struct hclge_cfg_com_tqp_queue_cmd *req =
7228                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7229         int ret;
7230
7231         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7232         req->tqp_id = cpu_to_le16(tqp_id);
7233         req->stream_id = cpu_to_le16(stream_id);
7234         if (enable)
7235                 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7236
7237         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7238         if (ret)
7239                 dev_err(&hdev->pdev->dev,
7240                         "Tqp enable fail, status =%d.\n", ret);
7241         return ret;
7242 }
7243
7244 static int hclge_set_loopback(struct hnae3_handle *handle,
7245                               enum hnae3_loop loop_mode, bool en)
7246 {
7247         struct hclge_vport *vport = hclge_get_vport(handle);
7248         struct hnae3_knic_private_info *kinfo;
7249         struct hclge_dev *hdev = vport->back;
7250         int i, ret;
7251
7252         /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7253          * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7254          * the same, the packets are looped back in the SSU. If SSU loopback
7255          * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7256          */
7257         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7258                 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7259
7260                 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7261                                                 HCLGE_SWITCH_ALW_LPBK_MASK);
7262                 if (ret)
7263                         return ret;
7264         }
7265
7266         switch (loop_mode) {
7267         case HNAE3_LOOP_APP:
7268                 ret = hclge_set_app_loopback(hdev, en);
7269                 break;
7270         case HNAE3_LOOP_SERIAL_SERDES:
7271         case HNAE3_LOOP_PARALLEL_SERDES:
7272                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
7273                 break;
7274         case HNAE3_LOOP_PHY:
7275                 ret = hclge_set_phy_loopback(hdev, en);
7276                 break;
7277         default:
7278                 ret = -ENOTSUPP;
7279                 dev_err(&hdev->pdev->dev,
7280                         "loop_mode %d is not supported\n", loop_mode);
7281                 break;
7282         }
7283
7284         if (ret)
7285                 return ret;
7286
7287         kinfo = &vport->nic.kinfo;
7288         for (i = 0; i < kinfo->num_tqps; i++) {
7289                 ret = hclge_tqp_enable(hdev, i, 0, en);
7290                 if (ret)
7291                         return ret;
7292         }
7293
7294         return 0;
7295 }
7296
7297 static int hclge_set_default_loopback(struct hclge_dev *hdev)
7298 {
7299         int ret;
7300
7301         ret = hclge_set_app_loopback(hdev, false);
7302         if (ret)
7303                 return ret;
7304
7305         ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
7306         if (ret)
7307                 return ret;
7308
7309         return hclge_cfg_serdes_loopback(hdev, false,
7310                                          HNAE3_LOOP_PARALLEL_SERDES);
7311 }
7312
7313 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
7314 {
7315         struct hclge_vport *vport = hclge_get_vport(handle);
7316         struct hnae3_knic_private_info *kinfo;
7317         struct hnae3_queue *queue;
7318         struct hclge_tqp *tqp;
7319         int i;
7320
7321         kinfo = &vport->nic.kinfo;
7322         for (i = 0; i < kinfo->num_tqps; i++) {
7323                 queue = handle->kinfo.tqp[i];
7324                 tqp = container_of(queue, struct hclge_tqp, q);
7325                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
7326         }
7327 }
7328
7329 static void hclge_flush_link_update(struct hclge_dev *hdev)
7330 {
7331 #define HCLGE_FLUSH_LINK_TIMEOUT        100000
7332
7333         unsigned long last = hdev->serv_processed_cnt;
7334         int i = 0;
7335
7336         while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
7337                i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
7338                last == hdev->serv_processed_cnt)
7339                 usleep_range(1, 1);
7340 }
7341
7342 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
7343 {
7344         struct hclge_vport *vport = hclge_get_vport(handle);
7345         struct hclge_dev *hdev = vport->back;
7346
7347         if (enable) {
7348                 hclge_task_schedule(hdev, 0);
7349         } else {
7350                 /* Set the DOWN flag here to disable link updating */
7351                 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7352
7353                 /* flush memory to make sure DOWN is seen by service task */
7354                 smp_mb__before_atomic();
7355                 hclge_flush_link_update(hdev);
7356         }
7357 }
7358
7359 static int hclge_ae_start(struct hnae3_handle *handle)
7360 {
7361         struct hclge_vport *vport = hclge_get_vport(handle);
7362         struct hclge_dev *hdev = vport->back;
7363
7364         /* mac enable */
7365         hclge_cfg_mac_mode(hdev, true);
7366         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
7367         hdev->hw.mac.link = 0;
7368
7369         /* reset tqp stats */
7370         hclge_reset_tqp_stats(handle);
7371
7372         hclge_mac_start_phy(hdev);
7373
7374         return 0;
7375 }
7376
7377 static void hclge_ae_stop(struct hnae3_handle *handle)
7378 {
7379         struct hclge_vport *vport = hclge_get_vport(handle);
7380         struct hclge_dev *hdev = vport->back;
7381         int i;
7382
7383         set_bit(HCLGE_STATE_DOWN, &hdev->state);
7384         spin_lock_bh(&hdev->fd_rule_lock);
7385         hclge_clear_arfs_rules(handle);
7386         spin_unlock_bh(&hdev->fd_rule_lock);
7387
7388         /* If it is not PF reset, the firmware will disable the MAC,
7389          * so it only need to stop phy here.
7390          */
7391         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
7392             hdev->reset_type != HNAE3_FUNC_RESET) {
7393                 hclge_mac_stop_phy(hdev);
7394                 hclge_update_link_status(hdev);
7395                 return;
7396         }
7397
7398         for (i = 0; i < handle->kinfo.num_tqps; i++)
7399                 hclge_reset_tqp(handle, i);
7400
7401         hclge_config_mac_tnl_int(hdev, false);
7402
7403         /* Mac disable */
7404         hclge_cfg_mac_mode(hdev, false);
7405
7406         hclge_mac_stop_phy(hdev);
7407
7408         /* reset tqp stats */
7409         hclge_reset_tqp_stats(handle);
7410         hclge_update_link_status(hdev);
7411 }
7412
7413 int hclge_vport_start(struct hclge_vport *vport)
7414 {
7415         struct hclge_dev *hdev = vport->back;
7416
7417         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7418         vport->last_active_jiffies = jiffies;
7419
7420         if (test_bit(vport->vport_id, hdev->vport_config_block)) {
7421                 if (vport->vport_id) {
7422                         hclge_restore_mac_table_common(vport);
7423                         hclge_restore_vport_vlan_table(vport);
7424                 } else {
7425                         hclge_restore_hw_table(hdev);
7426                 }
7427         }
7428
7429         clear_bit(vport->vport_id, hdev->vport_config_block);
7430
7431         return 0;
7432 }
7433
7434 void hclge_vport_stop(struct hclge_vport *vport)
7435 {
7436         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7437 }
7438
7439 static int hclge_client_start(struct hnae3_handle *handle)
7440 {
7441         struct hclge_vport *vport = hclge_get_vport(handle);
7442
7443         return hclge_vport_start(vport);
7444 }
7445
7446 static void hclge_client_stop(struct hnae3_handle *handle)
7447 {
7448         struct hclge_vport *vport = hclge_get_vport(handle);
7449
7450         hclge_vport_stop(vport);
7451 }
7452
7453 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
7454                                          u16 cmdq_resp, u8  resp_code,
7455                                          enum hclge_mac_vlan_tbl_opcode op)
7456 {
7457         struct hclge_dev *hdev = vport->back;
7458
7459         if (cmdq_resp) {
7460                 dev_err(&hdev->pdev->dev,
7461                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
7462                         cmdq_resp);
7463                 return -EIO;
7464         }
7465
7466         if (op == HCLGE_MAC_VLAN_ADD) {
7467                 if (!resp_code || resp_code == 1)
7468                         return 0;
7469                 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
7470                          resp_code == HCLGE_ADD_MC_OVERFLOW)
7471                         return -ENOSPC;
7472
7473                 dev_err(&hdev->pdev->dev,
7474                         "add mac addr failed for undefined, code=%u.\n",
7475                         resp_code);
7476                 return -EIO;
7477         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
7478                 if (!resp_code) {
7479                         return 0;
7480                 } else if (resp_code == 1) {
7481                         dev_dbg(&hdev->pdev->dev,
7482                                 "remove mac addr failed for miss.\n");
7483                         return -ENOENT;
7484                 }
7485
7486                 dev_err(&hdev->pdev->dev,
7487                         "remove mac addr failed for undefined, code=%u.\n",
7488                         resp_code);
7489                 return -EIO;
7490         } else if (op == HCLGE_MAC_VLAN_LKUP) {
7491                 if (!resp_code) {
7492                         return 0;
7493                 } else if (resp_code == 1) {
7494                         dev_dbg(&hdev->pdev->dev,
7495                                 "lookup mac addr failed for miss.\n");
7496                         return -ENOENT;
7497                 }
7498
7499                 dev_err(&hdev->pdev->dev,
7500                         "lookup mac addr failed for undefined, code=%u.\n",
7501                         resp_code);
7502                 return -EIO;
7503         }
7504
7505         dev_err(&hdev->pdev->dev,
7506                 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7507
7508         return -EINVAL;
7509 }
7510
7511 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7512 {
7513 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
7514
7515         unsigned int word_num;
7516         unsigned int bit_num;
7517
7518         if (vfid > 255 || vfid < 0)
7519                 return -EIO;
7520
7521         if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
7522                 word_num = vfid / 32;
7523                 bit_num  = vfid % 32;
7524                 if (clr)
7525                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7526                 else
7527                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
7528         } else {
7529                 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
7530                 bit_num  = vfid % 32;
7531                 if (clr)
7532                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7533                 else
7534                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
7535         }
7536
7537         return 0;
7538 }
7539
7540 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7541 {
7542 #define HCLGE_DESC_NUMBER 3
7543 #define HCLGE_FUNC_NUMBER_PER_DESC 6
7544         int i, j;
7545
7546         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
7547                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7548                         if (desc[i].data[j])
7549                                 return false;
7550
7551         return true;
7552 }
7553
7554 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7555                                    const u8 *addr, bool is_mc)
7556 {
7557         const unsigned char *mac_addr = addr;
7558         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7559                        (mac_addr[0]) | (mac_addr[1] << 8);
7560         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
7561
7562         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7563         if (is_mc) {
7564                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7565                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7566         }
7567
7568         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7569         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7570 }
7571
7572 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7573                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
7574 {
7575         struct hclge_dev *hdev = vport->back;
7576         struct hclge_desc desc;
7577         u8 resp_code;
7578         u16 retval;
7579         int ret;
7580
7581         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7582
7583         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7584
7585         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7586         if (ret) {
7587                 dev_err(&hdev->pdev->dev,
7588                         "del mac addr failed for cmd_send, ret =%d.\n",
7589                         ret);
7590                 return ret;
7591         }
7592         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7593         retval = le16_to_cpu(desc.retval);
7594
7595         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7596                                              HCLGE_MAC_VLAN_REMOVE);
7597 }
7598
7599 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7600                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
7601                                      struct hclge_desc *desc,
7602                                      bool is_mc)
7603 {
7604         struct hclge_dev *hdev = vport->back;
7605         u8 resp_code;
7606         u16 retval;
7607         int ret;
7608
7609         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7610         if (is_mc) {
7611                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7612                 memcpy(desc[0].data,
7613                        req,
7614                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7615                 hclge_cmd_setup_basic_desc(&desc[1],
7616                                            HCLGE_OPC_MAC_VLAN_ADD,
7617                                            true);
7618                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7619                 hclge_cmd_setup_basic_desc(&desc[2],
7620                                            HCLGE_OPC_MAC_VLAN_ADD,
7621                                            true);
7622                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7623         } else {
7624                 memcpy(desc[0].data,
7625                        req,
7626                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7627                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7628         }
7629         if (ret) {
7630                 dev_err(&hdev->pdev->dev,
7631                         "lookup mac addr failed for cmd_send, ret =%d.\n",
7632                         ret);
7633                 return ret;
7634         }
7635         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7636         retval = le16_to_cpu(desc[0].retval);
7637
7638         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7639                                              HCLGE_MAC_VLAN_LKUP);
7640 }
7641
7642 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7643                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
7644                                   struct hclge_desc *mc_desc)
7645 {
7646         struct hclge_dev *hdev = vport->back;
7647         int cfg_status;
7648         u8 resp_code;
7649         u16 retval;
7650         int ret;
7651
7652         if (!mc_desc) {
7653                 struct hclge_desc desc;
7654
7655                 hclge_cmd_setup_basic_desc(&desc,
7656                                            HCLGE_OPC_MAC_VLAN_ADD,
7657                                            false);
7658                 memcpy(desc.data, req,
7659                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7660                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7661                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7662                 retval = le16_to_cpu(desc.retval);
7663
7664                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7665                                                            resp_code,
7666                                                            HCLGE_MAC_VLAN_ADD);
7667         } else {
7668                 hclge_cmd_reuse_desc(&mc_desc[0], false);
7669                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7670                 hclge_cmd_reuse_desc(&mc_desc[1], false);
7671                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7672                 hclge_cmd_reuse_desc(&mc_desc[2], false);
7673                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7674                 memcpy(mc_desc[0].data, req,
7675                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7676                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7677                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7678                 retval = le16_to_cpu(mc_desc[0].retval);
7679
7680                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7681                                                            resp_code,
7682                                                            HCLGE_MAC_VLAN_ADD);
7683         }
7684
7685         if (ret) {
7686                 dev_err(&hdev->pdev->dev,
7687                         "add mac addr failed for cmd_send, ret =%d.\n",
7688                         ret);
7689                 return ret;
7690         }
7691
7692         return cfg_status;
7693 }
7694
7695 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7696                                u16 *allocated_size)
7697 {
7698         struct hclge_umv_spc_alc_cmd *req;
7699         struct hclge_desc desc;
7700         int ret;
7701
7702         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7703         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7704
7705         req->space_size = cpu_to_le32(space_size);
7706
7707         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7708         if (ret) {
7709                 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7710                         ret);
7711                 return ret;
7712         }
7713
7714         *allocated_size = le32_to_cpu(desc.data[1]);
7715
7716         return 0;
7717 }
7718
7719 static int hclge_init_umv_space(struct hclge_dev *hdev)
7720 {
7721         u16 allocated_size = 0;
7722         int ret;
7723
7724         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
7725         if (ret)
7726                 return ret;
7727
7728         if (allocated_size < hdev->wanted_umv_size)
7729                 dev_warn(&hdev->pdev->dev,
7730                          "failed to alloc umv space, want %u, get %u\n",
7731                          hdev->wanted_umv_size, allocated_size);
7732
7733         hdev->max_umv_size = allocated_size;
7734         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7735         hdev->share_umv_size = hdev->priv_umv_size +
7736                         hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7737
7738         return 0;
7739 }
7740
7741 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7742 {
7743         struct hclge_vport *vport;
7744         int i;
7745
7746         for (i = 0; i < hdev->num_alloc_vport; i++) {
7747                 vport = &hdev->vport[i];
7748                 vport->used_umv_num = 0;
7749         }
7750
7751         mutex_lock(&hdev->vport_lock);
7752         hdev->share_umv_size = hdev->priv_umv_size +
7753                         hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7754         mutex_unlock(&hdev->vport_lock);
7755 }
7756
7757 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
7758 {
7759         struct hclge_dev *hdev = vport->back;
7760         bool is_full;
7761
7762         if (need_lock)
7763                 mutex_lock(&hdev->vport_lock);
7764
7765         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7766                    hdev->share_umv_size == 0);
7767
7768         if (need_lock)
7769                 mutex_unlock(&hdev->vport_lock);
7770
7771         return is_full;
7772 }
7773
7774 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7775 {
7776         struct hclge_dev *hdev = vport->back;
7777
7778         if (is_free) {
7779                 if (vport->used_umv_num > hdev->priv_umv_size)
7780                         hdev->share_umv_size++;
7781
7782                 if (vport->used_umv_num > 0)
7783                         vport->used_umv_num--;
7784         } else {
7785                 if (vport->used_umv_num >= hdev->priv_umv_size &&
7786                     hdev->share_umv_size > 0)
7787                         hdev->share_umv_size--;
7788                 vport->used_umv_num++;
7789         }
7790 }
7791
7792 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7793                                                   const u8 *mac_addr)
7794 {
7795         struct hclge_mac_node *mac_node, *tmp;
7796
7797         list_for_each_entry_safe(mac_node, tmp, list, node)
7798                 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7799                         return mac_node;
7800
7801         return NULL;
7802 }
7803
7804 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
7805                                   enum HCLGE_MAC_NODE_STATE state)
7806 {
7807         switch (state) {
7808         /* from set_rx_mode or tmp_add_list */
7809         case HCLGE_MAC_TO_ADD:
7810                 if (mac_node->state == HCLGE_MAC_TO_DEL)
7811                         mac_node->state = HCLGE_MAC_ACTIVE;
7812                 break;
7813         /* only from set_rx_mode */
7814         case HCLGE_MAC_TO_DEL:
7815                 if (mac_node->state == HCLGE_MAC_TO_ADD) {
7816                         list_del(&mac_node->node);
7817                         kfree(mac_node);
7818                 } else {
7819                         mac_node->state = HCLGE_MAC_TO_DEL;
7820                 }
7821                 break;
7822         /* only from tmp_add_list, the mac_node->state won't be
7823          * ACTIVE.
7824          */
7825         case HCLGE_MAC_ACTIVE:
7826                 if (mac_node->state == HCLGE_MAC_TO_ADD)
7827                         mac_node->state = HCLGE_MAC_ACTIVE;
7828
7829                 break;
7830         }
7831 }
7832
7833 int hclge_update_mac_list(struct hclge_vport *vport,
7834                           enum HCLGE_MAC_NODE_STATE state,
7835                           enum HCLGE_MAC_ADDR_TYPE mac_type,
7836                           const unsigned char *addr)
7837 {
7838         struct hclge_dev *hdev = vport->back;
7839         struct hclge_mac_node *mac_node;
7840         struct list_head *list;
7841
7842         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7843                 &vport->uc_mac_list : &vport->mc_mac_list;
7844
7845         spin_lock_bh(&vport->mac_list_lock);
7846
7847         /* if the mac addr is already in the mac list, no need to add a new
7848          * one into it, just check the mac addr state, convert it to a new
7849          * new state, or just remove it, or do nothing.
7850          */
7851         mac_node = hclge_find_mac_node(list, addr);
7852         if (mac_node) {
7853                 hclge_update_mac_node(mac_node, state);
7854                 spin_unlock_bh(&vport->mac_list_lock);
7855                 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7856                 return 0;
7857         }
7858
7859         /* if this address is never added, unnecessary to delete */
7860         if (state == HCLGE_MAC_TO_DEL) {
7861                 spin_unlock_bh(&vport->mac_list_lock);
7862                 dev_err(&hdev->pdev->dev,
7863                         "failed to delete address %pM from mac list\n",
7864                         addr);
7865                 return -ENOENT;
7866         }
7867
7868         mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
7869         if (!mac_node) {
7870                 spin_unlock_bh(&vport->mac_list_lock);
7871                 return -ENOMEM;
7872         }
7873
7874         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7875
7876         mac_node->state = state;
7877         ether_addr_copy(mac_node->mac_addr, addr);
7878         list_add_tail(&mac_node->node, list);
7879
7880         spin_unlock_bh(&vport->mac_list_lock);
7881
7882         return 0;
7883 }
7884
7885 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7886                              const unsigned char *addr)
7887 {
7888         struct hclge_vport *vport = hclge_get_vport(handle);
7889
7890         return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
7891                                      addr);
7892 }
7893
7894 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7895                              const unsigned char *addr)
7896 {
7897         struct hclge_dev *hdev = vport->back;
7898         struct hclge_mac_vlan_tbl_entry_cmd req;
7899         struct hclge_desc desc;
7900         u16 egress_port = 0;
7901         int ret;
7902
7903         /* mac addr check */
7904         if (is_zero_ether_addr(addr) ||
7905             is_broadcast_ether_addr(addr) ||
7906             is_multicast_ether_addr(addr)) {
7907                 dev_err(&hdev->pdev->dev,
7908                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7909                          addr, is_zero_ether_addr(addr),
7910                          is_broadcast_ether_addr(addr),
7911                          is_multicast_ether_addr(addr));
7912                 return -EINVAL;
7913         }
7914
7915         memset(&req, 0, sizeof(req));
7916
7917         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7918                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7919
7920         req.egress_port = cpu_to_le16(egress_port);
7921
7922         hclge_prepare_mac_addr(&req, addr, false);
7923
7924         /* Lookup the mac address in the mac_vlan table, and add
7925          * it if the entry is inexistent. Repeated unicast entry
7926          * is not allowed in the mac vlan table.
7927          */
7928         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7929         if (ret == -ENOENT) {
7930                 mutex_lock(&hdev->vport_lock);
7931                 if (!hclge_is_umv_space_full(vport, false)) {
7932                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7933                         if (!ret)
7934                                 hclge_update_umv_space(vport, false);
7935                         mutex_unlock(&hdev->vport_lock);
7936                         return ret;
7937                 }
7938                 mutex_unlock(&hdev->vport_lock);
7939
7940                 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
7941                         dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7942                                 hdev->priv_umv_size);
7943
7944                 return -ENOSPC;
7945         }
7946
7947         /* check if we just hit the duplicate */
7948         if (!ret) {
7949                 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7950                          vport->vport_id, addr);
7951                 return 0;
7952         }
7953
7954         dev_err(&hdev->pdev->dev,
7955                 "PF failed to add unicast entry(%pM) in the MAC table\n",
7956                 addr);
7957
7958         return ret;
7959 }
7960
7961 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7962                             const unsigned char *addr)
7963 {
7964         struct hclge_vport *vport = hclge_get_vport(handle);
7965
7966         return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
7967                                      addr);
7968 }
7969
7970 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7971                             const unsigned char *addr)
7972 {
7973         struct hclge_dev *hdev = vport->back;
7974         struct hclge_mac_vlan_tbl_entry_cmd req;
7975         int ret;
7976
7977         /* mac addr check */
7978         if (is_zero_ether_addr(addr) ||
7979             is_broadcast_ether_addr(addr) ||
7980             is_multicast_ether_addr(addr)) {
7981                 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7982                         addr);
7983                 return -EINVAL;
7984         }
7985
7986         memset(&req, 0, sizeof(req));
7987         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7988         hclge_prepare_mac_addr(&req, addr, false);
7989         ret = hclge_remove_mac_vlan_tbl(vport, &req);
7990         if (!ret) {
7991                 mutex_lock(&hdev->vport_lock);
7992                 hclge_update_umv_space(vport, true);
7993                 mutex_unlock(&hdev->vport_lock);
7994         } else if (ret == -ENOENT) {
7995                 ret = 0;
7996         }
7997
7998         return ret;
7999 }
8000
8001 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8002                              const unsigned char *addr)
8003 {
8004         struct hclge_vport *vport = hclge_get_vport(handle);
8005
8006         return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8007                                      addr);
8008 }
8009
8010 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8011                              const unsigned char *addr)
8012 {
8013         struct hclge_dev *hdev = vport->back;
8014         struct hclge_mac_vlan_tbl_entry_cmd req;
8015         struct hclge_desc desc[3];
8016         int status;
8017
8018         /* mac addr check */
8019         if (!is_multicast_ether_addr(addr)) {
8020                 dev_err(&hdev->pdev->dev,
8021                         "Add mc mac err! invalid mac:%pM.\n",
8022                          addr);
8023                 return -EINVAL;
8024         }
8025         memset(&req, 0, sizeof(req));
8026         hclge_prepare_mac_addr(&req, addr, true);
8027         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8028         if (status) {
8029                 /* This mac addr do not exist, add new entry for it */
8030                 memset(desc[0].data, 0, sizeof(desc[0].data));
8031                 memset(desc[1].data, 0, sizeof(desc[0].data));
8032                 memset(desc[2].data, 0, sizeof(desc[0].data));
8033         }
8034         status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8035         if (status)
8036                 return status;
8037         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8038
8039         /* if already overflow, not to print each time */
8040         if (status == -ENOSPC &&
8041             !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8042                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8043
8044         return status;
8045 }
8046
8047 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8048                             const unsigned char *addr)
8049 {
8050         struct hclge_vport *vport = hclge_get_vport(handle);
8051
8052         return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8053                                      addr);
8054 }
8055
8056 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8057                             const unsigned char *addr)
8058 {
8059         struct hclge_dev *hdev = vport->back;
8060         struct hclge_mac_vlan_tbl_entry_cmd req;
8061         enum hclge_cmd_status status;
8062         struct hclge_desc desc[3];
8063
8064         /* mac addr check */
8065         if (!is_multicast_ether_addr(addr)) {
8066                 dev_dbg(&hdev->pdev->dev,
8067                         "Remove mc mac err! invalid mac:%pM.\n",
8068                          addr);
8069                 return -EINVAL;
8070         }
8071
8072         memset(&req, 0, sizeof(req));
8073         hclge_prepare_mac_addr(&req, addr, true);
8074         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8075         if (!status) {
8076                 /* This mac addr exist, remove this handle's VFID for it */
8077                 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8078                 if (status)
8079                         return status;
8080
8081                 if (hclge_is_all_function_id_zero(desc))
8082                         /* All the vfid is zero, so need to delete this entry */
8083                         status = hclge_remove_mac_vlan_tbl(vport, &req);
8084                 else
8085                         /* Not all the vfid is zero, update the vfid */
8086                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8087
8088         } else if (status == -ENOENT) {
8089                 status = 0;
8090         }
8091
8092         return status;
8093 }
8094
8095 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8096                                       struct list_head *list,
8097                                       int (*sync)(struct hclge_vport *,
8098                                                   const unsigned char *))
8099 {
8100         struct hclge_mac_node *mac_node, *tmp;
8101         int ret;
8102
8103         list_for_each_entry_safe(mac_node, tmp, list, node) {
8104                 ret = sync(vport, mac_node->mac_addr);
8105                 if (!ret) {
8106                         mac_node->state = HCLGE_MAC_ACTIVE;
8107                 } else {
8108                         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8109                                 &vport->state);
8110                         break;
8111                 }
8112         }
8113 }
8114
8115 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8116                                         struct list_head *list,
8117                                         int (*unsync)(struct hclge_vport *,
8118                                                       const unsigned char *))
8119 {
8120         struct hclge_mac_node *mac_node, *tmp;
8121         int ret;
8122
8123         list_for_each_entry_safe(mac_node, tmp, list, node) {
8124                 ret = unsync(vport, mac_node->mac_addr);
8125                 if (!ret || ret == -ENOENT) {
8126                         list_del(&mac_node->node);
8127                         kfree(mac_node);
8128                 } else {
8129                         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8130                                 &vport->state);
8131                         break;
8132                 }
8133         }
8134 }
8135
8136 static bool hclge_sync_from_add_list(struct list_head *add_list,
8137                                      struct list_head *mac_list)
8138 {
8139         struct hclge_mac_node *mac_node, *tmp, *new_node;
8140         bool all_added = true;
8141
8142         list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8143                 if (mac_node->state == HCLGE_MAC_TO_ADD)
8144                         all_added = false;
8145
8146                 /* if the mac address from tmp_add_list is not in the
8147                  * uc/mc_mac_list, it means have received a TO_DEL request
8148                  * during the time window of adding the mac address into mac
8149                  * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8150                  * then it will be removed at next time. else it must be TO_ADD,
8151                  * this address hasn't been added into mac table,
8152                  * so just remove the mac node.
8153                  */
8154                 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8155                 if (new_node) {
8156                         hclge_update_mac_node(new_node, mac_node->state);
8157                         list_del(&mac_node->node);
8158                         kfree(mac_node);
8159                 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8160                         mac_node->state = HCLGE_MAC_TO_DEL;
8161                         list_del(&mac_node->node);
8162                         list_add_tail(&mac_node->node, mac_list);
8163                 } else {
8164                         list_del(&mac_node->node);
8165                         kfree(mac_node);
8166                 }
8167         }
8168
8169         return all_added;
8170 }
8171
8172 static void hclge_sync_from_del_list(struct list_head *del_list,
8173                                      struct list_head *mac_list)
8174 {
8175         struct hclge_mac_node *mac_node, *tmp, *new_node;
8176
8177         list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8178                 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8179                 if (new_node) {
8180                         /* If the mac addr exists in the mac list, it means
8181                          * received a new TO_ADD request during the time window
8182                          * of configuring the mac address. For the mac node
8183                          * state is TO_ADD, and the address is already in the
8184                          * in the hardware(due to delete fail), so we just need
8185                          * to change the mac node state to ACTIVE.
8186                          */
8187                         new_node->state = HCLGE_MAC_ACTIVE;
8188                         list_del(&mac_node->node);
8189                         kfree(mac_node);
8190                 } else {
8191                         list_del(&mac_node->node);
8192                         list_add_tail(&mac_node->node, mac_list);
8193                 }
8194         }
8195 }
8196
8197 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8198                                         enum HCLGE_MAC_ADDR_TYPE mac_type,
8199                                         bool is_all_added)
8200 {
8201         if (mac_type == HCLGE_MAC_ADDR_UC) {
8202                 if (is_all_added)
8203                         vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8204                 else
8205                         vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8206         } else {
8207                 if (is_all_added)
8208                         vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8209                 else
8210                         vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8211         }
8212 }
8213
8214 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8215                                        enum HCLGE_MAC_ADDR_TYPE mac_type)
8216 {
8217         struct hclge_mac_node *mac_node, *tmp, *new_node;
8218         struct list_head tmp_add_list, tmp_del_list;
8219         struct list_head *list;
8220         bool all_added;
8221
8222         INIT_LIST_HEAD(&tmp_add_list);
8223         INIT_LIST_HEAD(&tmp_del_list);
8224
8225         /* move the mac addr to the tmp_add_list and tmp_del_list, then
8226          * we can add/delete these mac addr outside the spin lock
8227          */
8228         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8229                 &vport->uc_mac_list : &vport->mc_mac_list;
8230
8231         spin_lock_bh(&vport->mac_list_lock);
8232
8233         list_for_each_entry_safe(mac_node, tmp, list, node) {
8234                 switch (mac_node->state) {
8235                 case HCLGE_MAC_TO_DEL:
8236                         list_del(&mac_node->node);
8237                         list_add_tail(&mac_node->node, &tmp_del_list);
8238                         break;
8239                 case HCLGE_MAC_TO_ADD:
8240                         new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8241                         if (!new_node)
8242                                 goto stop_traverse;
8243                         ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8244                         new_node->state = mac_node->state;
8245                         list_add_tail(&new_node->node, &tmp_add_list);
8246                         break;
8247                 default:
8248                         break;
8249                 }
8250         }
8251
8252 stop_traverse:
8253         spin_unlock_bh(&vport->mac_list_lock);
8254
8255         /* delete first, in order to get max mac table space for adding */
8256         if (mac_type == HCLGE_MAC_ADDR_UC) {
8257                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8258                                             hclge_rm_uc_addr_common);
8259                 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8260                                           hclge_add_uc_addr_common);
8261         } else {
8262                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8263                                             hclge_rm_mc_addr_common);
8264                 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8265                                           hclge_add_mc_addr_common);
8266         }
8267
8268         /* if some mac addresses were added/deleted fail, move back to the
8269          * mac_list, and retry at next time.
8270          */
8271         spin_lock_bh(&vport->mac_list_lock);
8272
8273         hclge_sync_from_del_list(&tmp_del_list, list);
8274         all_added = hclge_sync_from_add_list(&tmp_add_list, list);
8275
8276         spin_unlock_bh(&vport->mac_list_lock);
8277
8278         hclge_update_overflow_flags(vport, mac_type, all_added);
8279 }
8280
8281 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
8282 {
8283         struct hclge_dev *hdev = vport->back;
8284
8285         if (test_bit(vport->vport_id, hdev->vport_config_block))
8286                 return false;
8287
8288         if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
8289                 return true;
8290
8291         return false;
8292 }
8293
8294 static void hclge_sync_mac_table(struct hclge_dev *hdev)
8295 {
8296         int i;
8297
8298         for (i = 0; i < hdev->num_alloc_vport; i++) {
8299                 struct hclge_vport *vport = &hdev->vport[i];
8300
8301                 if (!hclge_need_sync_mac_table(vport))
8302                         continue;
8303
8304                 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
8305                 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
8306         }
8307 }
8308
8309 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
8310                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
8311 {
8312         int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
8313         struct hclge_mac_node *mac_cfg, *tmp;
8314         struct hclge_dev *hdev = vport->back;
8315         struct list_head tmp_del_list, *list;
8316         int ret;
8317
8318         if (mac_type == HCLGE_MAC_ADDR_UC) {
8319                 list = &vport->uc_mac_list;
8320                 unsync = hclge_rm_uc_addr_common;
8321         } else {
8322                 list = &vport->mc_mac_list;
8323                 unsync = hclge_rm_mc_addr_common;
8324         }
8325
8326         INIT_LIST_HEAD(&tmp_del_list);
8327
8328         if (!is_del_list)
8329                 set_bit(vport->vport_id, hdev->vport_config_block);
8330
8331         spin_lock_bh(&vport->mac_list_lock);
8332
8333         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
8334                 switch (mac_cfg->state) {
8335                 case HCLGE_MAC_TO_DEL:
8336                 case HCLGE_MAC_ACTIVE:
8337                         list_del(&mac_cfg->node);
8338                         list_add_tail(&mac_cfg->node, &tmp_del_list);
8339                         break;
8340                 case HCLGE_MAC_TO_ADD:
8341                         if (is_del_list) {
8342                                 list_del(&mac_cfg->node);
8343                                 kfree(mac_cfg);
8344                         }
8345                         break;
8346                 }
8347         }
8348
8349         spin_unlock_bh(&vport->mac_list_lock);
8350
8351         list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
8352                 ret = unsync(vport, mac_cfg->mac_addr);
8353                 if (!ret || ret == -ENOENT) {
8354                         /* clear all mac addr from hardware, but remain these
8355                          * mac addr in the mac list, and restore them after
8356                          * vf reset finished.
8357                          */
8358                         if (!is_del_list &&
8359                             mac_cfg->state == HCLGE_MAC_ACTIVE) {
8360                                 mac_cfg->state = HCLGE_MAC_TO_ADD;
8361                         } else {
8362                                 list_del(&mac_cfg->node);
8363                                 kfree(mac_cfg);
8364                         }
8365                 } else if (is_del_list) {
8366                         mac_cfg->state = HCLGE_MAC_TO_DEL;
8367                 }
8368         }
8369
8370         spin_lock_bh(&vport->mac_list_lock);
8371
8372         hclge_sync_from_del_list(&tmp_del_list, list);
8373
8374         spin_unlock_bh(&vport->mac_list_lock);
8375 }
8376
8377 /* remove all mac address when uninitailize */
8378 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
8379                                         enum HCLGE_MAC_ADDR_TYPE mac_type)
8380 {
8381         struct hclge_mac_node *mac_node, *tmp;
8382         struct hclge_dev *hdev = vport->back;
8383         struct list_head tmp_del_list, *list;
8384
8385         INIT_LIST_HEAD(&tmp_del_list);
8386
8387         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8388                 &vport->uc_mac_list : &vport->mc_mac_list;
8389
8390         spin_lock_bh(&vport->mac_list_lock);
8391
8392         list_for_each_entry_safe(mac_node, tmp, list, node) {
8393                 switch (mac_node->state) {
8394                 case HCLGE_MAC_TO_DEL:
8395                 case HCLGE_MAC_ACTIVE:
8396                         list_del(&mac_node->node);
8397                         list_add_tail(&mac_node->node, &tmp_del_list);
8398                         break;
8399                 case HCLGE_MAC_TO_ADD:
8400                         list_del(&mac_node->node);
8401                         kfree(mac_node);
8402                         break;
8403                 }
8404         }
8405
8406         spin_unlock_bh(&vport->mac_list_lock);
8407
8408         if (mac_type == HCLGE_MAC_ADDR_UC)
8409                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8410                                             hclge_rm_uc_addr_common);
8411         else
8412                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8413                                             hclge_rm_mc_addr_common);
8414
8415         if (!list_empty(&tmp_del_list))
8416                 dev_warn(&hdev->pdev->dev,
8417                          "uninit %s mac list for vport %u not completely.\n",
8418                          mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
8419                          vport->vport_id);
8420
8421         list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
8422                 list_del(&mac_node->node);
8423                 kfree(mac_node);
8424         }
8425 }
8426
8427 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
8428 {
8429         struct hclge_vport *vport;
8430         int i;
8431
8432         for (i = 0; i < hdev->num_alloc_vport; i++) {
8433                 vport = &hdev->vport[i];
8434                 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
8435                 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
8436         }
8437 }
8438
8439 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
8440                                               u16 cmdq_resp, u8 resp_code)
8441 {
8442 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
8443 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
8444 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
8445 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
8446
8447         int return_status;
8448
8449         if (cmdq_resp) {
8450                 dev_err(&hdev->pdev->dev,
8451                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
8452                         cmdq_resp);
8453                 return -EIO;
8454         }
8455
8456         switch (resp_code) {
8457         case HCLGE_ETHERTYPE_SUCCESS_ADD:
8458         case HCLGE_ETHERTYPE_ALREADY_ADD:
8459                 return_status = 0;
8460                 break;
8461         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
8462                 dev_err(&hdev->pdev->dev,
8463                         "add mac ethertype failed for manager table overflow.\n");
8464                 return_status = -EIO;
8465                 break;
8466         case HCLGE_ETHERTYPE_KEY_CONFLICT:
8467                 dev_err(&hdev->pdev->dev,
8468                         "add mac ethertype failed for key conflict.\n");
8469                 return_status = -EIO;
8470                 break;
8471         default:
8472                 dev_err(&hdev->pdev->dev,
8473                         "add mac ethertype failed for undefined, code=%u.\n",
8474                         resp_code);
8475                 return_status = -EIO;
8476         }
8477
8478         return return_status;
8479 }
8480
8481 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
8482                                      u8 *mac_addr)
8483 {
8484         struct hclge_mac_vlan_tbl_entry_cmd req;
8485         struct hclge_dev *hdev = vport->back;
8486         struct hclge_desc desc;
8487         u16 egress_port = 0;
8488         int i;
8489
8490         if (is_zero_ether_addr(mac_addr))
8491                 return false;
8492
8493         memset(&req, 0, sizeof(req));
8494         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8495                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8496         req.egress_port = cpu_to_le16(egress_port);
8497         hclge_prepare_mac_addr(&req, mac_addr, false);
8498
8499         if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8500                 return true;
8501
8502         vf_idx += HCLGE_VF_VPORT_START_NUM;
8503         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8504                 if (i != vf_idx &&
8505                     ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8506                         return true;
8507
8508         return false;
8509 }
8510
8511 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8512                             u8 *mac_addr)
8513 {
8514         struct hclge_vport *vport = hclge_get_vport(handle);
8515         struct hclge_dev *hdev = vport->back;
8516
8517         vport = hclge_get_vf_vport(hdev, vf);
8518         if (!vport)
8519                 return -EINVAL;
8520
8521         if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8522                 dev_info(&hdev->pdev->dev,
8523                          "Specified MAC(=%pM) is same as before, no change committed!\n",
8524                          mac_addr);
8525                 return 0;
8526         }
8527
8528         if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8529                 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8530                         mac_addr);
8531                 return -EEXIST;
8532         }
8533
8534         ether_addr_copy(vport->vf_info.mac, mac_addr);
8535
8536         if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8537                 dev_info(&hdev->pdev->dev,
8538                          "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8539                          vf, mac_addr);
8540                 return hclge_inform_reset_assert_to_vf(vport);
8541         }
8542
8543         dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8544                  vf, mac_addr);
8545         return 0;
8546 }
8547
8548 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8549                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
8550 {
8551         struct hclge_desc desc;
8552         u8 resp_code;
8553         u16 retval;
8554         int ret;
8555
8556         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8557         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8558
8559         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8560         if (ret) {
8561                 dev_err(&hdev->pdev->dev,
8562                         "add mac ethertype failed for cmd_send, ret =%d.\n",
8563                         ret);
8564                 return ret;
8565         }
8566
8567         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8568         retval = le16_to_cpu(desc.retval);
8569
8570         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8571 }
8572
8573 static int init_mgr_tbl(struct hclge_dev *hdev)
8574 {
8575         int ret;
8576         int i;
8577
8578         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8579                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8580                 if (ret) {
8581                         dev_err(&hdev->pdev->dev,
8582                                 "add mac ethertype failed, ret =%d.\n",
8583                                 ret);
8584                         return ret;
8585                 }
8586         }
8587
8588         return 0;
8589 }
8590
8591 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8592 {
8593         struct hclge_vport *vport = hclge_get_vport(handle);
8594         struct hclge_dev *hdev = vport->back;
8595
8596         ether_addr_copy(p, hdev->hw.mac.mac_addr);
8597 }
8598
8599 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8600                                        const u8 *old_addr, const u8 *new_addr)
8601 {
8602         struct list_head *list = &vport->uc_mac_list;
8603         struct hclge_mac_node *old_node, *new_node;
8604
8605         new_node = hclge_find_mac_node(list, new_addr);
8606         if (!new_node) {
8607                 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8608                 if (!new_node)
8609                         return -ENOMEM;
8610
8611                 new_node->state = HCLGE_MAC_TO_ADD;
8612                 ether_addr_copy(new_node->mac_addr, new_addr);
8613                 list_add(&new_node->node, list);
8614         } else {
8615                 if (new_node->state == HCLGE_MAC_TO_DEL)
8616                         new_node->state = HCLGE_MAC_ACTIVE;
8617
8618                 /* make sure the new addr is in the list head, avoid dev
8619                  * addr may be not re-added into mac table for the umv space
8620                  * limitation after global/imp reset which will clear mac
8621                  * table by hardware.
8622                  */
8623                 list_move(&new_node->node, list);
8624         }
8625
8626         if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8627                 old_node = hclge_find_mac_node(list, old_addr);
8628                 if (old_node) {
8629                         if (old_node->state == HCLGE_MAC_TO_ADD) {
8630                                 list_del(&old_node->node);
8631                                 kfree(old_node);
8632                         } else {
8633                                 old_node->state = HCLGE_MAC_TO_DEL;
8634                         }
8635                 }
8636         }
8637
8638         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8639
8640         return 0;
8641 }
8642
8643 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8644                               bool is_first)
8645 {
8646         const unsigned char *new_addr = (const unsigned char *)p;
8647         struct hclge_vport *vport = hclge_get_vport(handle);
8648         struct hclge_dev *hdev = vport->back;
8649         unsigned char *old_addr = NULL;
8650         int ret;
8651
8652         /* mac addr check */
8653         if (is_zero_ether_addr(new_addr) ||
8654             is_broadcast_ether_addr(new_addr) ||
8655             is_multicast_ether_addr(new_addr)) {
8656                 dev_err(&hdev->pdev->dev,
8657                         "change uc mac err! invalid mac: %pM.\n",
8658                          new_addr);
8659                 return -EINVAL;
8660         }
8661
8662         ret = hclge_pause_addr_cfg(hdev, new_addr);
8663         if (ret) {
8664                 dev_err(&hdev->pdev->dev,
8665                         "failed to configure mac pause address, ret = %d\n",
8666                         ret);
8667                 return ret;
8668         }
8669
8670         if (!is_first)
8671                 old_addr = hdev->hw.mac.mac_addr;
8672
8673         spin_lock_bh(&vport->mac_list_lock);
8674         ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
8675         if (ret) {
8676                 dev_err(&hdev->pdev->dev,
8677                         "failed to change the mac addr:%pM, ret = %d\n",
8678                         new_addr, ret);
8679                 spin_unlock_bh(&vport->mac_list_lock);
8680
8681                 if (!is_first)
8682                         hclge_pause_addr_cfg(hdev, old_addr);
8683
8684                 return ret;
8685         }
8686         /* we must update dev addr with spin lock protect, preventing dev addr
8687          * being removed by set_rx_mode path.
8688          */
8689         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
8690         spin_unlock_bh(&vport->mac_list_lock);
8691
8692         hclge_task_schedule(hdev, 0);
8693
8694         return 0;
8695 }
8696
8697 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8698                           int cmd)
8699 {
8700         struct hclge_vport *vport = hclge_get_vport(handle);
8701         struct hclge_dev *hdev = vport->back;
8702
8703         if (!hdev->hw.mac.phydev)
8704                 return -EOPNOTSUPP;
8705
8706         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8707 }
8708
8709 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
8710                                       u8 fe_type, bool filter_en, u8 vf_id)
8711 {
8712         struct hclge_vlan_filter_ctrl_cmd *req;
8713         struct hclge_desc desc;
8714         int ret;
8715
8716         /* read current vlan filter parameter */
8717         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
8718         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
8719         req->vlan_type = vlan_type;
8720         req->vf_id = vf_id;
8721
8722         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8723         if (ret) {
8724                 dev_err(&hdev->pdev->dev,
8725                         "failed to get vlan filter config, ret = %d.\n", ret);
8726                 return ret;
8727         }
8728
8729         /* modify and write new config parameter */
8730         hclge_cmd_reuse_desc(&desc, false);
8731         req->vlan_fe = filter_en ?
8732                         (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
8733
8734         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8735         if (ret)
8736                 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
8737                         ret);
8738
8739         return ret;
8740 }
8741
8742 #define HCLGE_FILTER_TYPE_VF            0
8743 #define HCLGE_FILTER_TYPE_PORT          1
8744 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
8745 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
8746 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
8747 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
8748 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
8749 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
8750                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
8751 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
8752                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
8753
8754 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
8755 {
8756         struct hclge_vport *vport = hclge_get_vport(handle);
8757         struct hclge_dev *hdev = vport->back;
8758
8759         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8760                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8761                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
8762                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8763                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
8764         } else {
8765                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8766                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
8767                                            0);
8768         }
8769         if (enable)
8770                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8771         else
8772                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
8773 }
8774
8775 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
8776                                     bool is_kill, u16 vlan,
8777                                     __be16 proto)
8778 {
8779         struct hclge_vport *vport = &hdev->vport[vfid];
8780         struct hclge_vlan_filter_vf_cfg_cmd *req0;
8781         struct hclge_vlan_filter_vf_cfg_cmd *req1;
8782         struct hclge_desc desc[2];
8783         u8 vf_byte_val;
8784         u8 vf_byte_off;
8785         int ret;
8786
8787         /* if vf vlan table is full, firmware will close vf vlan filter, it
8788          * is unable and unnecessary to add new vlan id to vf vlan filter.
8789          * If spoof check is enable, and vf vlan is full, it shouldn't add
8790          * new vlan, because tx packets with these vlan id will be dropped.
8791          */
8792         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
8793                 if (vport->vf_info.spoofchk && vlan) {
8794                         dev_err(&hdev->pdev->dev,
8795                                 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
8796                         return -EPERM;
8797                 }
8798                 return 0;
8799         }
8800
8801         hclge_cmd_setup_basic_desc(&desc[0],
8802                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8803         hclge_cmd_setup_basic_desc(&desc[1],
8804                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8805
8806         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8807
8808         vf_byte_off = vfid / 8;
8809         vf_byte_val = 1 << (vfid % 8);
8810
8811         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8812         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
8813
8814         req0->vlan_id  = cpu_to_le16(vlan);
8815         req0->vlan_cfg = is_kill;
8816
8817         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
8818                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
8819         else
8820                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
8821
8822         ret = hclge_cmd_send(&hdev->hw, desc, 2);
8823         if (ret) {
8824                 dev_err(&hdev->pdev->dev,
8825                         "Send vf vlan command fail, ret =%d.\n",
8826                         ret);
8827                 return ret;
8828         }
8829
8830         if (!is_kill) {
8831 #define HCLGE_VF_VLAN_NO_ENTRY  2
8832                 if (!req0->resp_code || req0->resp_code == 1)
8833                         return 0;
8834
8835                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
8836                         set_bit(vfid, hdev->vf_vlan_full);
8837                         dev_warn(&hdev->pdev->dev,
8838                                  "vf vlan table is full, vf vlan filter is disabled\n");
8839                         return 0;
8840                 }
8841
8842                 dev_err(&hdev->pdev->dev,
8843                         "Add vf vlan filter fail, ret =%u.\n",
8844                         req0->resp_code);
8845         } else {
8846 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
8847                 if (!req0->resp_code)
8848                         return 0;
8849
8850                 /* vf vlan filter is disabled when vf vlan table is full,
8851                  * then new vlan id will not be added into vf vlan table.
8852                  * Just return 0 without warning, avoid massive verbose
8853                  * print logs when unload.
8854                  */
8855                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
8856                         return 0;
8857
8858                 dev_err(&hdev->pdev->dev,
8859                         "Kill vf vlan filter fail, ret =%u.\n",
8860                         req0->resp_code);
8861         }
8862
8863         return -EIO;
8864 }
8865
8866 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
8867                                       u16 vlan_id, bool is_kill)
8868 {
8869         struct hclge_vlan_filter_pf_cfg_cmd *req;
8870         struct hclge_desc desc;
8871         u8 vlan_offset_byte_val;
8872         u8 vlan_offset_byte;
8873         u8 vlan_offset_160;
8874         int ret;
8875
8876         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
8877
8878         vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
8879         vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
8880                            HCLGE_VLAN_BYTE_SIZE;
8881         vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
8882
8883         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
8884         req->vlan_offset = vlan_offset_160;
8885         req->vlan_cfg = is_kill;
8886         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
8887
8888         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8889         if (ret)
8890                 dev_err(&hdev->pdev->dev,
8891                         "port vlan command, send fail, ret =%d.\n", ret);
8892         return ret;
8893 }
8894
8895 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
8896                                     u16 vport_id, u16 vlan_id,
8897                                     bool is_kill)
8898 {
8899         u16 vport_idx, vport_num = 0;
8900         int ret;
8901
8902         if (is_kill && !vlan_id)
8903                 return 0;
8904
8905         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
8906                                        proto);
8907         if (ret) {
8908                 dev_err(&hdev->pdev->dev,
8909                         "Set %u vport vlan filter config fail, ret =%d.\n",
8910                         vport_id, ret);
8911                 return ret;
8912         }
8913
8914         /* vlan 0 may be added twice when 8021q module is enabled */
8915         if (!is_kill && !vlan_id &&
8916             test_bit(vport_id, hdev->vlan_table[vlan_id]))
8917                 return 0;
8918
8919         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
8920                 dev_err(&hdev->pdev->dev,
8921                         "Add port vlan failed, vport %u is already in vlan %u\n",
8922                         vport_id, vlan_id);
8923                 return -EINVAL;
8924         }
8925
8926         if (is_kill &&
8927             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
8928                 dev_err(&hdev->pdev->dev,
8929                         "Delete port vlan failed, vport %u is not in vlan %u\n",
8930                         vport_id, vlan_id);
8931                 return -EINVAL;
8932         }
8933
8934         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
8935                 vport_num++;
8936
8937         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
8938                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
8939                                                  is_kill);
8940
8941         return ret;
8942 }
8943
8944 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
8945 {
8946         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
8947         struct hclge_vport_vtag_tx_cfg_cmd *req;
8948         struct hclge_dev *hdev = vport->back;
8949         struct hclge_desc desc;
8950         u16 bmap_index;
8951         int status;
8952
8953         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
8954
8955         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
8956         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
8957         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
8958         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
8959                       vcfg->accept_tag1 ? 1 : 0);
8960         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
8961                       vcfg->accept_untag1 ? 1 : 0);
8962         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
8963                       vcfg->accept_tag2 ? 1 : 0);
8964         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
8965                       vcfg->accept_untag2 ? 1 : 0);
8966         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
8967                       vcfg->insert_tag1_en ? 1 : 0);
8968         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8969                       vcfg->insert_tag2_en ? 1 : 0);
8970         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
8971                       vcfg->tag_shift_mode_en ? 1 : 0);
8972         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
8973
8974         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8975         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8976                         HCLGE_VF_NUM_PER_BYTE;
8977         req->vf_bitmap[bmap_index] =
8978                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8979
8980         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8981         if (status)
8982                 dev_err(&hdev->pdev->dev,
8983                         "Send port txvlan cfg command fail, ret =%d\n",
8984                         status);
8985
8986         return status;
8987 }
8988
8989 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8990 {
8991         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8992         struct hclge_vport_vtag_rx_cfg_cmd *req;
8993         struct hclge_dev *hdev = vport->back;
8994         struct hclge_desc desc;
8995         u16 bmap_index;
8996         int status;
8997
8998         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8999
9000         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9001         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9002                       vcfg->strip_tag1_en ? 1 : 0);
9003         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9004                       vcfg->strip_tag2_en ? 1 : 0);
9005         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9006                       vcfg->vlan1_vlan_prionly ? 1 : 0);
9007         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9008                       vcfg->vlan2_vlan_prionly ? 1 : 0);
9009         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9010                       vcfg->strip_tag1_discard_en ? 1 : 0);
9011         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9012                       vcfg->strip_tag2_discard_en ? 1 : 0);
9013
9014         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9015         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9016                         HCLGE_VF_NUM_PER_BYTE;
9017         req->vf_bitmap[bmap_index] =
9018                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9019
9020         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9021         if (status)
9022                 dev_err(&hdev->pdev->dev,
9023                         "Send port rxvlan cfg command fail, ret =%d\n",
9024                         status);
9025
9026         return status;
9027 }
9028
9029 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9030                                   u16 port_base_vlan_state,
9031                                   u16 vlan_tag)
9032 {
9033         int ret;
9034
9035         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9036                 vport->txvlan_cfg.accept_tag1 = true;
9037                 vport->txvlan_cfg.insert_tag1_en = false;
9038                 vport->txvlan_cfg.default_tag1 = 0;
9039         } else {
9040                 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9041
9042                 vport->txvlan_cfg.accept_tag1 =
9043                         ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9044                 vport->txvlan_cfg.insert_tag1_en = true;
9045                 vport->txvlan_cfg.default_tag1 = vlan_tag;
9046         }
9047
9048         vport->txvlan_cfg.accept_untag1 = true;
9049
9050         /* accept_tag2 and accept_untag2 are not supported on
9051          * pdev revision(0x20), new revision support them,
9052          * this two fields can not be configured by user.
9053          */
9054         vport->txvlan_cfg.accept_tag2 = true;
9055         vport->txvlan_cfg.accept_untag2 = true;
9056         vport->txvlan_cfg.insert_tag2_en = false;
9057         vport->txvlan_cfg.default_tag2 = 0;
9058         vport->txvlan_cfg.tag_shift_mode_en = true;
9059
9060         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9061                 vport->rxvlan_cfg.strip_tag1_en = false;
9062                 vport->rxvlan_cfg.strip_tag2_en =
9063                                 vport->rxvlan_cfg.rx_vlan_offload_en;
9064                 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9065         } else {
9066                 vport->rxvlan_cfg.strip_tag1_en =
9067                                 vport->rxvlan_cfg.rx_vlan_offload_en;
9068                 vport->rxvlan_cfg.strip_tag2_en = true;
9069                 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9070         }
9071
9072         vport->rxvlan_cfg.strip_tag1_discard_en = false;
9073         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9074         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9075
9076         ret = hclge_set_vlan_tx_offload_cfg(vport);
9077         if (ret)
9078                 return ret;
9079
9080         return hclge_set_vlan_rx_offload_cfg(vport);
9081 }
9082
9083 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9084 {
9085         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9086         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9087         struct hclge_desc desc;
9088         int status;
9089
9090         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9091         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9092         rx_req->ot_fst_vlan_type =
9093                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9094         rx_req->ot_sec_vlan_type =
9095                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9096         rx_req->in_fst_vlan_type =
9097                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9098         rx_req->in_sec_vlan_type =
9099                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9100
9101         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9102         if (status) {
9103                 dev_err(&hdev->pdev->dev,
9104                         "Send rxvlan protocol type command fail, ret =%d\n",
9105                         status);
9106                 return status;
9107         }
9108
9109         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9110
9111         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9112         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9113         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9114
9115         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9116         if (status)
9117                 dev_err(&hdev->pdev->dev,
9118                         "Send txvlan protocol type command fail, ret =%d\n",
9119                         status);
9120
9121         return status;
9122 }
9123
9124 static int hclge_init_vlan_config(struct hclge_dev *hdev)
9125 {
9126 #define HCLGE_DEF_VLAN_TYPE             0x8100
9127
9128         struct hnae3_handle *handle = &hdev->vport[0].nic;
9129         struct hclge_vport *vport;
9130         int ret;
9131         int i;
9132
9133         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9134                 /* for revision 0x21, vf vlan filter is per function */
9135                 for (i = 0; i < hdev->num_alloc_vport; i++) {
9136                         vport = &hdev->vport[i];
9137                         ret = hclge_set_vlan_filter_ctrl(hdev,
9138                                                          HCLGE_FILTER_TYPE_VF,
9139                                                          HCLGE_FILTER_FE_EGRESS,
9140                                                          true,
9141                                                          vport->vport_id);
9142                         if (ret)
9143                                 return ret;
9144                 }
9145
9146                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9147                                                  HCLGE_FILTER_FE_INGRESS, true,
9148                                                  0);
9149                 if (ret)
9150                         return ret;
9151         } else {
9152                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9153                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
9154                                                  true, 0);
9155                 if (ret)
9156                         return ret;
9157         }
9158
9159         handle->netdev_flags |= HNAE3_VLAN_FLTR;
9160
9161         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9162         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9163         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9164         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9165         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
9166         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
9167
9168         ret = hclge_set_vlan_protocol_type(hdev);
9169         if (ret)
9170                 return ret;
9171
9172         for (i = 0; i < hdev->num_alloc_vport; i++) {
9173                 u16 vlan_tag;
9174
9175                 vport = &hdev->vport[i];
9176                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9177
9178                 ret = hclge_vlan_offload_cfg(vport,
9179                                              vport->port_base_vlan_cfg.state,
9180                                              vlan_tag);
9181                 if (ret)
9182                         return ret;
9183         }
9184
9185         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
9186 }
9187
9188 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9189                                        bool writen_to_tbl)
9190 {
9191         struct hclge_vport_vlan_cfg *vlan;
9192
9193         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
9194         if (!vlan)
9195                 return;
9196
9197         vlan->hd_tbl_status = writen_to_tbl;
9198         vlan->vlan_id = vlan_id;
9199
9200         list_add_tail(&vlan->node, &vport->vlan_list);
9201 }
9202
9203 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
9204 {
9205         struct hclge_vport_vlan_cfg *vlan, *tmp;
9206         struct hclge_dev *hdev = vport->back;
9207         int ret;
9208
9209         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9210                 if (!vlan->hd_tbl_status) {
9211                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9212                                                        vport->vport_id,
9213                                                        vlan->vlan_id, false);
9214                         if (ret) {
9215                                 dev_err(&hdev->pdev->dev,
9216                                         "restore vport vlan list failed, ret=%d\n",
9217                                         ret);
9218                                 return ret;
9219                         }
9220                 }
9221                 vlan->hd_tbl_status = true;
9222         }
9223
9224         return 0;
9225 }
9226
9227 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9228                                       bool is_write_tbl)
9229 {
9230         struct hclge_vport_vlan_cfg *vlan, *tmp;
9231         struct hclge_dev *hdev = vport->back;
9232
9233         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9234                 if (vlan->vlan_id == vlan_id) {
9235                         if (is_write_tbl && vlan->hd_tbl_status)
9236                                 hclge_set_vlan_filter_hw(hdev,
9237                                                          htons(ETH_P_8021Q),
9238                                                          vport->vport_id,
9239                                                          vlan_id,
9240                                                          true);
9241
9242                         list_del(&vlan->node);
9243                         kfree(vlan);
9244                         break;
9245                 }
9246         }
9247 }
9248
9249 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
9250 {
9251         struct hclge_vport_vlan_cfg *vlan, *tmp;
9252         struct hclge_dev *hdev = vport->back;
9253
9254         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9255                 if (vlan->hd_tbl_status)
9256                         hclge_set_vlan_filter_hw(hdev,
9257                                                  htons(ETH_P_8021Q),
9258                                                  vport->vport_id,
9259                                                  vlan->vlan_id,
9260                                                  true);
9261
9262                 vlan->hd_tbl_status = false;
9263                 if (is_del_list) {
9264                         list_del(&vlan->node);
9265                         kfree(vlan);
9266                 }
9267         }
9268         clear_bit(vport->vport_id, hdev->vf_vlan_full);
9269 }
9270
9271 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
9272 {
9273         struct hclge_vport_vlan_cfg *vlan, *tmp;
9274         struct hclge_vport *vport;
9275         int i;
9276
9277         for (i = 0; i < hdev->num_alloc_vport; i++) {
9278                 vport = &hdev->vport[i];
9279                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9280                         list_del(&vlan->node);
9281                         kfree(vlan);
9282                 }
9283         }
9284 }
9285
9286 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
9287 {
9288         struct hclge_vport_vlan_cfg *vlan, *tmp;
9289         struct hclge_dev *hdev = vport->back;
9290         u16 vlan_proto;
9291         u16 vlan_id;
9292         u16 state;
9293         int ret;
9294
9295         vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
9296         vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9297         state = vport->port_base_vlan_cfg.state;
9298
9299         if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
9300                 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
9301                 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
9302                                          vport->vport_id, vlan_id,
9303                                          false);
9304                 return;
9305         }
9306
9307         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9308                 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9309                                                vport->vport_id,
9310                                                vlan->vlan_id, false);
9311                 if (ret)
9312                         break;
9313                 vlan->hd_tbl_status = true;
9314         }
9315 }
9316
9317 /* For global reset and imp reset, hardware will clear the mac table,
9318  * so we change the mac address state from ACTIVE to TO_ADD, then they
9319  * can be restored in the service task after reset complete. Furtherly,
9320  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
9321  * be restored after reset, so just remove these mac nodes from mac_list.
9322  */
9323 static void hclge_mac_node_convert_for_reset(struct list_head *list)
9324 {
9325         struct hclge_mac_node *mac_node, *tmp;
9326
9327         list_for_each_entry_safe(mac_node, tmp, list, node) {
9328                 if (mac_node->state == HCLGE_MAC_ACTIVE) {
9329                         mac_node->state = HCLGE_MAC_TO_ADD;
9330                 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
9331                         list_del(&mac_node->node);
9332                         kfree(mac_node);
9333                 }
9334         }
9335 }
9336
9337 void hclge_restore_mac_table_common(struct hclge_vport *vport)
9338 {
9339         spin_lock_bh(&vport->mac_list_lock);
9340
9341         hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
9342         hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
9343         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9344
9345         spin_unlock_bh(&vport->mac_list_lock);
9346 }
9347
9348 static void hclge_restore_hw_table(struct hclge_dev *hdev)
9349 {
9350         struct hclge_vport *vport = &hdev->vport[0];
9351         struct hnae3_handle *handle = &vport->nic;
9352
9353         hclge_restore_mac_table_common(vport);
9354         hclge_restore_vport_vlan_table(vport);
9355         set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
9356
9357         hclge_restore_fd_entries(handle);
9358 }
9359
9360 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
9361 {
9362         struct hclge_vport *vport = hclge_get_vport(handle);
9363
9364         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9365                 vport->rxvlan_cfg.strip_tag1_en = false;
9366                 vport->rxvlan_cfg.strip_tag2_en = enable;
9367                 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9368         } else {
9369                 vport->rxvlan_cfg.strip_tag1_en = enable;
9370                 vport->rxvlan_cfg.strip_tag2_en = true;
9371                 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9372         }
9373
9374         vport->rxvlan_cfg.strip_tag1_discard_en = false;
9375         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9376         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9377         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
9378
9379         return hclge_set_vlan_rx_offload_cfg(vport);
9380 }
9381
9382 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
9383                                             u16 port_base_vlan_state,
9384                                             struct hclge_vlan_info *new_info,
9385                                             struct hclge_vlan_info *old_info)
9386 {
9387         struct hclge_dev *hdev = vport->back;
9388         int ret;
9389
9390         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
9391                 hclge_rm_vport_all_vlan_table(vport, false);
9392                 return hclge_set_vlan_filter_hw(hdev,
9393                                                  htons(new_info->vlan_proto),
9394                                                  vport->vport_id,
9395                                                  new_info->vlan_tag,
9396                                                  false);
9397         }
9398
9399         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
9400                                        vport->vport_id, old_info->vlan_tag,
9401                                        true);
9402         if (ret)
9403                 return ret;
9404
9405         return hclge_add_vport_all_vlan_table(vport);
9406 }
9407
9408 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
9409                                     struct hclge_vlan_info *vlan_info)
9410 {
9411         struct hnae3_handle *nic = &vport->nic;
9412         struct hclge_vlan_info *old_vlan_info;
9413         struct hclge_dev *hdev = vport->back;
9414         int ret;
9415
9416         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9417
9418         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
9419         if (ret)
9420                 return ret;
9421
9422         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
9423                 /* add new VLAN tag */
9424                 ret = hclge_set_vlan_filter_hw(hdev,
9425                                                htons(vlan_info->vlan_proto),
9426                                                vport->vport_id,
9427                                                vlan_info->vlan_tag,
9428                                                false);
9429                 if (ret)
9430                         return ret;
9431
9432                 /* remove old VLAN tag */
9433                 ret = hclge_set_vlan_filter_hw(hdev,
9434                                                htons(old_vlan_info->vlan_proto),
9435                                                vport->vport_id,
9436                                                old_vlan_info->vlan_tag,
9437                                                true);
9438                 if (ret)
9439                         return ret;
9440
9441                 goto update;
9442         }
9443
9444         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
9445                                                old_vlan_info);
9446         if (ret)
9447                 return ret;
9448
9449         /* update state only when disable/enable port based VLAN */
9450         vport->port_base_vlan_cfg.state = state;
9451         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
9452                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
9453         else
9454                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
9455
9456 update:
9457         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
9458         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
9459         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
9460
9461         return 0;
9462 }
9463
9464 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
9465                                           enum hnae3_port_base_vlan_state state,
9466                                           u16 vlan)
9467 {
9468         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9469                 if (!vlan)
9470                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9471                 else
9472                         return HNAE3_PORT_BASE_VLAN_ENABLE;
9473         } else {
9474                 if (!vlan)
9475                         return HNAE3_PORT_BASE_VLAN_DISABLE;
9476                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
9477                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9478                 else
9479                         return HNAE3_PORT_BASE_VLAN_MODIFY;
9480         }
9481 }
9482
9483 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
9484                                     u16 vlan, u8 qos, __be16 proto)
9485 {
9486         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
9487         struct hclge_vport *vport = hclge_get_vport(handle);
9488         struct hclge_dev *hdev = vport->back;
9489         struct hclge_vlan_info vlan_info;
9490         u16 state;
9491         int ret;
9492
9493         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9494                 return -EOPNOTSUPP;
9495
9496         vport = hclge_get_vf_vport(hdev, vfid);
9497         if (!vport)
9498                 return -EINVAL;
9499
9500         /* qos is a 3 bits value, so can not be bigger than 7 */
9501         if (vlan > VLAN_N_VID - 1 || qos > 7)
9502                 return -EINVAL;
9503         if (proto != htons(ETH_P_8021Q))
9504                 return -EPROTONOSUPPORT;
9505
9506         state = hclge_get_port_base_vlan_state(vport,
9507                                                vport->port_base_vlan_cfg.state,
9508                                                vlan);
9509         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
9510                 return 0;
9511
9512         vlan_info.vlan_tag = vlan;
9513         vlan_info.qos = qos;
9514         vlan_info.vlan_proto = ntohs(proto);
9515
9516         ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
9517         if (ret) {
9518                 dev_err(&hdev->pdev->dev,
9519                         "failed to update port base vlan for vf %d, ret = %d\n",
9520                         vfid, ret);
9521                 return ret;
9522         }
9523
9524         /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
9525          * VLAN state.
9526          */
9527         if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
9528             test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
9529                 hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9530                                                   vport->vport_id, state,
9531                                                   vlan, qos,
9532                                                   ntohs(proto));
9533
9534         return 0;
9535 }
9536
9537 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9538 {
9539         struct hclge_vlan_info *vlan_info;
9540         struct hclge_vport *vport;
9541         int ret;
9542         int vf;
9543
9544         /* clear port base vlan for all vf */
9545         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9546                 vport = &hdev->vport[vf];
9547                 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9548
9549                 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9550                                                vport->vport_id,
9551                                                vlan_info->vlan_tag, true);
9552                 if (ret)
9553                         dev_err(&hdev->pdev->dev,
9554                                 "failed to clear vf vlan for vf%d, ret = %d\n",
9555                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
9556         }
9557 }
9558
9559 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9560                           u16 vlan_id, bool is_kill)
9561 {
9562         struct hclge_vport *vport = hclge_get_vport(handle);
9563         struct hclge_dev *hdev = vport->back;
9564         bool writen_to_tbl = false;
9565         int ret = 0;
9566
9567         /* When device is resetting or reset failed, firmware is unable to
9568          * handle mailbox. Just record the vlan id, and remove it after
9569          * reset finished.
9570          */
9571         if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9572              test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
9573                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9574                 return -EBUSY;
9575         }
9576
9577         /* when port base vlan enabled, we use port base vlan as the vlan
9578          * filter entry. In this case, we don't update vlan filter table
9579          * when user add new vlan or remove exist vlan, just update the vport
9580          * vlan list. The vlan id in vlan list will be writen in vlan filter
9581          * table until port base vlan disabled
9582          */
9583         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9584                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
9585                                                vlan_id, is_kill);
9586                 writen_to_tbl = true;
9587         }
9588
9589         if (!ret) {
9590                 if (is_kill)
9591                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
9592                 else
9593                         hclge_add_vport_vlan_table(vport, vlan_id,
9594                                                    writen_to_tbl);
9595         } else if (is_kill) {
9596                 /* when remove hw vlan filter failed, record the vlan id,
9597                  * and try to remove it from hw later, to be consistence
9598                  * with stack
9599                  */
9600                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9601         }
9602         return ret;
9603 }
9604
9605 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9606 {
9607 #define HCLGE_MAX_SYNC_COUNT    60
9608
9609         int i, ret, sync_cnt = 0;
9610         u16 vlan_id;
9611
9612         /* start from vport 1 for PF is always alive */
9613         for (i = 0; i < hdev->num_alloc_vport; i++) {
9614                 struct hclge_vport *vport = &hdev->vport[i];
9615
9616                 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9617                                          VLAN_N_VID);
9618                 while (vlan_id != VLAN_N_VID) {
9619                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9620                                                        vport->vport_id, vlan_id,
9621                                                        true);
9622                         if (ret && ret != -EINVAL)
9623                                 return;
9624
9625                         clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9626                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
9627
9628                         sync_cnt++;
9629                         if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9630                                 return;
9631
9632                         vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9633                                                  VLAN_N_VID);
9634                 }
9635         }
9636 }
9637
9638 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
9639 {
9640         struct hclge_config_max_frm_size_cmd *req;
9641         struct hclge_desc desc;
9642
9643         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9644
9645         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
9646         req->max_frm_size = cpu_to_le16(new_mps);
9647         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
9648
9649         return hclge_cmd_send(&hdev->hw, &desc, 1);
9650 }
9651
9652 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9653 {
9654         struct hclge_vport *vport = hclge_get_vport(handle);
9655
9656         return hclge_set_vport_mtu(vport, new_mtu);
9657 }
9658
9659 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9660 {
9661         struct hclge_dev *hdev = vport->back;
9662         int i, max_frm_size, ret;
9663
9664         /* HW supprt 2 layer vlan */
9665         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9666         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9667             max_frm_size > HCLGE_MAC_MAX_FRAME)
9668                 return -EINVAL;
9669
9670         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9671         mutex_lock(&hdev->vport_lock);
9672         /* VF's mps must fit within hdev->mps */
9673         if (vport->vport_id && max_frm_size > hdev->mps) {
9674                 mutex_unlock(&hdev->vport_lock);
9675                 return -EINVAL;
9676         } else if (vport->vport_id) {
9677                 vport->mps = max_frm_size;
9678                 mutex_unlock(&hdev->vport_lock);
9679                 return 0;
9680         }
9681
9682         /* PF's mps must be greater then VF's mps */
9683         for (i = 1; i < hdev->num_alloc_vport; i++)
9684                 if (max_frm_size < hdev->vport[i].mps) {
9685                         mutex_unlock(&hdev->vport_lock);
9686                         return -EINVAL;
9687                 }
9688
9689         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9690
9691         ret = hclge_set_mac_mtu(hdev, max_frm_size);
9692         if (ret) {
9693                 dev_err(&hdev->pdev->dev,
9694                         "Change mtu fail, ret =%d\n", ret);
9695                 goto out;
9696         }
9697
9698         hdev->mps = max_frm_size;
9699         vport->mps = max_frm_size;
9700
9701         ret = hclge_buffer_alloc(hdev);
9702         if (ret)
9703                 dev_err(&hdev->pdev->dev,
9704                         "Allocate buffer fail, ret =%d\n", ret);
9705
9706 out:
9707         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
9708         mutex_unlock(&hdev->vport_lock);
9709         return ret;
9710 }
9711
9712 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
9713                                     bool enable)
9714 {
9715         struct hclge_reset_tqp_queue_cmd *req;
9716         struct hclge_desc desc;
9717         int ret;
9718
9719         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
9720
9721         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9722         req->tqp_id = cpu_to_le16(queue_id);
9723         if (enable)
9724                 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
9725
9726         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9727         if (ret) {
9728                 dev_err(&hdev->pdev->dev,
9729                         "Send tqp reset cmd error, status =%d\n", ret);
9730                 return ret;
9731         }
9732
9733         return 0;
9734 }
9735
9736 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
9737 {
9738         struct hclge_reset_tqp_queue_cmd *req;
9739         struct hclge_desc desc;
9740         int ret;
9741
9742         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
9743
9744         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9745         req->tqp_id = cpu_to_le16(queue_id);
9746
9747         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9748         if (ret) {
9749                 dev_err(&hdev->pdev->dev,
9750                         "Get reset status error, status =%d\n", ret);
9751                 return ret;
9752         }
9753
9754         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
9755 }
9756
9757 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
9758 {
9759         struct hnae3_queue *queue;
9760         struct hclge_tqp *tqp;
9761
9762         queue = handle->kinfo.tqp[queue_id];
9763         tqp = container_of(queue, struct hclge_tqp, q);
9764
9765         return tqp->index;
9766 }
9767
9768 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
9769 {
9770         struct hclge_vport *vport = hclge_get_vport(handle);
9771         struct hclge_dev *hdev = vport->back;
9772         int reset_try_times = 0;
9773         int reset_status;
9774         u16 queue_gid;
9775         int ret;
9776
9777         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
9778
9779         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
9780         if (ret) {
9781                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
9782                 return ret;
9783         }
9784
9785         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9786         if (ret) {
9787                 dev_err(&hdev->pdev->dev,
9788                         "Send reset tqp cmd fail, ret = %d\n", ret);
9789                 return ret;
9790         }
9791
9792         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9793                 reset_status = hclge_get_reset_status(hdev, queue_gid);
9794                 if (reset_status)
9795                         break;
9796
9797                 /* Wait for tqp hw reset */
9798                 usleep_range(1000, 1200);
9799         }
9800
9801         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9802                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
9803                 return ret;
9804         }
9805
9806         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9807         if (ret)
9808                 dev_err(&hdev->pdev->dev,
9809                         "Deassert the soft reset fail, ret = %d\n", ret);
9810
9811         return ret;
9812 }
9813
9814 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
9815 {
9816         struct hclge_dev *hdev = vport->back;
9817         int reset_try_times = 0;
9818         int reset_status;
9819         u16 queue_gid;
9820         int ret;
9821
9822         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
9823
9824         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9825         if (ret) {
9826                 dev_warn(&hdev->pdev->dev,
9827                          "Send reset tqp cmd fail, ret = %d\n", ret);
9828                 return;
9829         }
9830
9831         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9832                 reset_status = hclge_get_reset_status(hdev, queue_gid);
9833                 if (reset_status)
9834                         break;
9835
9836                 /* Wait for tqp hw reset */
9837                 usleep_range(1000, 1200);
9838         }
9839
9840         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9841                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
9842                 return;
9843         }
9844
9845         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9846         if (ret)
9847                 dev_warn(&hdev->pdev->dev,
9848                          "Deassert the soft reset fail, ret = %d\n", ret);
9849 }
9850
9851 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
9852 {
9853         struct hclge_vport *vport = hclge_get_vport(handle);
9854         struct hclge_dev *hdev = vport->back;
9855
9856         return hdev->fw_version;
9857 }
9858
9859 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9860 {
9861         struct phy_device *phydev = hdev->hw.mac.phydev;
9862
9863         if (!phydev)
9864                 return;
9865
9866         phy_set_asym_pause(phydev, rx_en, tx_en);
9867 }
9868
9869 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9870 {
9871         int ret;
9872
9873         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
9874                 return 0;
9875
9876         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
9877         if (ret)
9878                 dev_err(&hdev->pdev->dev,
9879                         "configure pauseparam error, ret = %d.\n", ret);
9880
9881         return ret;
9882 }
9883
9884 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
9885 {
9886         struct phy_device *phydev = hdev->hw.mac.phydev;
9887         u16 remote_advertising = 0;
9888         u16 local_advertising;
9889         u32 rx_pause, tx_pause;
9890         u8 flowctl;
9891
9892         if (!phydev->link || !phydev->autoneg)
9893                 return 0;
9894
9895         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
9896
9897         if (phydev->pause)
9898                 remote_advertising = LPA_PAUSE_CAP;
9899
9900         if (phydev->asym_pause)
9901                 remote_advertising |= LPA_PAUSE_ASYM;
9902
9903         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
9904                                            remote_advertising);
9905         tx_pause = flowctl & FLOW_CTRL_TX;
9906         rx_pause = flowctl & FLOW_CTRL_RX;
9907
9908         if (phydev->duplex == HCLGE_MAC_HALF) {
9909                 tx_pause = 0;
9910                 rx_pause = 0;
9911         }
9912
9913         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
9914 }
9915
9916 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
9917                                  u32 *rx_en, u32 *tx_en)
9918 {
9919         struct hclge_vport *vport = hclge_get_vport(handle);
9920         struct hclge_dev *hdev = vport->back;
9921         struct phy_device *phydev = hdev->hw.mac.phydev;
9922
9923         *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
9924
9925         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9926                 *rx_en = 0;
9927                 *tx_en = 0;
9928                 return;
9929         }
9930
9931         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
9932                 *rx_en = 1;
9933                 *tx_en = 0;
9934         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
9935                 *tx_en = 1;
9936                 *rx_en = 0;
9937         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
9938                 *rx_en = 1;
9939                 *tx_en = 1;
9940         } else {
9941                 *rx_en = 0;
9942                 *tx_en = 0;
9943         }
9944 }
9945
9946 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
9947                                          u32 rx_en, u32 tx_en)
9948 {
9949         if (rx_en && tx_en)
9950                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
9951         else if (rx_en && !tx_en)
9952                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
9953         else if (!rx_en && tx_en)
9954                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
9955         else
9956                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
9957
9958         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
9959 }
9960
9961 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
9962                                 u32 rx_en, u32 tx_en)
9963 {
9964         struct hclge_vport *vport = hclge_get_vport(handle);
9965         struct hclge_dev *hdev = vport->back;
9966         struct phy_device *phydev = hdev->hw.mac.phydev;
9967         u32 fc_autoneg;
9968
9969         if (phydev) {
9970                 fc_autoneg = hclge_get_autoneg(handle);
9971                 if (auto_neg != fc_autoneg) {
9972                         dev_info(&hdev->pdev->dev,
9973                                  "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
9974                         return -EOPNOTSUPP;
9975                 }
9976         }
9977
9978         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9979                 dev_info(&hdev->pdev->dev,
9980                          "Priority flow control enabled. Cannot set link flow control.\n");
9981                 return -EOPNOTSUPP;
9982         }
9983
9984         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
9985
9986         hclge_record_user_pauseparam(hdev, rx_en, tx_en);
9987
9988         if (!auto_neg)
9989                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
9990
9991         if (phydev)
9992                 return phy_start_aneg(phydev);
9993
9994         return -EOPNOTSUPP;
9995 }
9996
9997 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
9998                                           u8 *auto_neg, u32 *speed, u8 *duplex)
9999 {
10000         struct hclge_vport *vport = hclge_get_vport(handle);
10001         struct hclge_dev *hdev = vport->back;
10002
10003         if (speed)
10004                 *speed = hdev->hw.mac.speed;
10005         if (duplex)
10006                 *duplex = hdev->hw.mac.duplex;
10007         if (auto_neg)
10008                 *auto_neg = hdev->hw.mac.autoneg;
10009 }
10010
10011 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
10012                                  u8 *module_type)
10013 {
10014         struct hclge_vport *vport = hclge_get_vport(handle);
10015         struct hclge_dev *hdev = vport->back;
10016
10017         /* When nic is down, the service task is not running, doesn't update
10018          * the port information per second. Query the port information before
10019          * return the media type, ensure getting the correct media information.
10020          */
10021         hclge_update_port_info(hdev);
10022
10023         if (media_type)
10024                 *media_type = hdev->hw.mac.media_type;
10025
10026         if (module_type)
10027                 *module_type = hdev->hw.mac.module_type;
10028 }
10029
10030 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
10031                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
10032 {
10033         struct hclge_vport *vport = hclge_get_vport(handle);
10034         struct hclge_dev *hdev = vport->back;
10035         struct phy_device *phydev = hdev->hw.mac.phydev;
10036         int mdix_ctrl, mdix, is_resolved;
10037         unsigned int retval;
10038
10039         if (!phydev) {
10040                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10041                 *tp_mdix = ETH_TP_MDI_INVALID;
10042                 return;
10043         }
10044
10045         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
10046
10047         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
10048         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
10049                                     HCLGE_PHY_MDIX_CTRL_S);
10050
10051         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
10052         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
10053         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
10054
10055         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
10056
10057         switch (mdix_ctrl) {
10058         case 0x0:
10059                 *tp_mdix_ctrl = ETH_TP_MDI;
10060                 break;
10061         case 0x1:
10062                 *tp_mdix_ctrl = ETH_TP_MDI_X;
10063                 break;
10064         case 0x3:
10065                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
10066                 break;
10067         default:
10068                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10069                 break;
10070         }
10071
10072         if (!is_resolved)
10073                 *tp_mdix = ETH_TP_MDI_INVALID;
10074         else if (mdix)
10075                 *tp_mdix = ETH_TP_MDI_X;
10076         else
10077                 *tp_mdix = ETH_TP_MDI;
10078 }
10079
10080 static void hclge_info_show(struct hclge_dev *hdev)
10081 {
10082         struct device *dev = &hdev->pdev->dev;
10083
10084         dev_info(dev, "PF info begin:\n");
10085
10086         dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
10087         dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
10088         dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
10089         dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
10090         dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
10091         dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
10092         dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
10093         dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
10094         dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
10095         dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
10096         dev_info(dev, "This is %s PF\n",
10097                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
10098         dev_info(dev, "DCB %s\n",
10099                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
10100         dev_info(dev, "MQPRIO %s\n",
10101                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
10102
10103         dev_info(dev, "PF info end.\n");
10104 }
10105
10106 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
10107                                           struct hclge_vport *vport)
10108 {
10109         struct hnae3_client *client = vport->nic.client;
10110         struct hclge_dev *hdev = ae_dev->priv;
10111         int rst_cnt = hdev->rst_stats.reset_cnt;
10112         int ret;
10113
10114         ret = client->ops->init_instance(&vport->nic);
10115         if (ret)
10116                 return ret;
10117
10118         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10119         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10120             rst_cnt != hdev->rst_stats.reset_cnt) {
10121                 ret = -EBUSY;
10122                 goto init_nic_err;
10123         }
10124
10125         /* Enable nic hw error interrupts */
10126         ret = hclge_config_nic_hw_error(hdev, true);
10127         if (ret) {
10128                 dev_err(&ae_dev->pdev->dev,
10129                         "fail(%d) to enable hw error interrupts\n", ret);
10130                 goto init_nic_err;
10131         }
10132
10133         hnae3_set_client_init_flag(client, ae_dev, 1);
10134
10135         if (netif_msg_drv(&hdev->vport->nic))
10136                 hclge_info_show(hdev);
10137
10138         return ret;
10139
10140 init_nic_err:
10141         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10142         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10143                 msleep(HCLGE_WAIT_RESET_DONE);
10144
10145         client->ops->uninit_instance(&vport->nic, 0);
10146
10147         return ret;
10148 }
10149
10150 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
10151                                            struct hclge_vport *vport)
10152 {
10153         struct hclge_dev *hdev = ae_dev->priv;
10154         struct hnae3_client *client;
10155         int rst_cnt;
10156         int ret;
10157
10158         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
10159             !hdev->nic_client)
10160                 return 0;
10161
10162         client = hdev->roce_client;
10163         ret = hclge_init_roce_base_info(vport);
10164         if (ret)
10165                 return ret;
10166
10167         rst_cnt = hdev->rst_stats.reset_cnt;
10168         ret = client->ops->init_instance(&vport->roce);
10169         if (ret)
10170                 return ret;
10171
10172         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10173         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10174             rst_cnt != hdev->rst_stats.reset_cnt) {
10175                 ret = -EBUSY;
10176                 goto init_roce_err;
10177         }
10178
10179         /* Enable roce ras interrupts */
10180         ret = hclge_config_rocee_ras_interrupt(hdev, true);
10181         if (ret) {
10182                 dev_err(&ae_dev->pdev->dev,
10183                         "fail(%d) to enable roce ras interrupts\n", ret);
10184                 goto init_roce_err;
10185         }
10186
10187         hnae3_set_client_init_flag(client, ae_dev, 1);
10188
10189         return 0;
10190
10191 init_roce_err:
10192         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10193         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10194                 msleep(HCLGE_WAIT_RESET_DONE);
10195
10196         hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
10197
10198         return ret;
10199 }
10200
10201 static int hclge_init_client_instance(struct hnae3_client *client,
10202                                       struct hnae3_ae_dev *ae_dev)
10203 {
10204         struct hclge_dev *hdev = ae_dev->priv;
10205         struct hclge_vport *vport;
10206         int i, ret;
10207
10208         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
10209                 vport = &hdev->vport[i];
10210
10211                 switch (client->type) {
10212                 case HNAE3_CLIENT_KNIC:
10213                         hdev->nic_client = client;
10214                         vport->nic.client = client;
10215                         ret = hclge_init_nic_client_instance(ae_dev, vport);
10216                         if (ret)
10217                                 goto clear_nic;
10218
10219                         ret = hclge_init_roce_client_instance(ae_dev, vport);
10220                         if (ret)
10221                                 goto clear_roce;
10222
10223                         break;
10224                 case HNAE3_CLIENT_ROCE:
10225                         if (hnae3_dev_roce_supported(hdev)) {
10226                                 hdev->roce_client = client;
10227                                 vport->roce.client = client;
10228                         }
10229
10230                         ret = hclge_init_roce_client_instance(ae_dev, vport);
10231                         if (ret)
10232                                 goto clear_roce;
10233
10234                         break;
10235                 default:
10236                         return -EINVAL;
10237                 }
10238         }
10239
10240         return 0;
10241
10242 clear_nic:
10243         hdev->nic_client = NULL;
10244         vport->nic.client = NULL;
10245         return ret;
10246 clear_roce:
10247         hdev->roce_client = NULL;
10248         vport->roce.client = NULL;
10249         return ret;
10250 }
10251
10252 static void hclge_uninit_client_instance(struct hnae3_client *client,
10253                                          struct hnae3_ae_dev *ae_dev)
10254 {
10255         struct hclge_dev *hdev = ae_dev->priv;
10256         struct hclge_vport *vport;
10257         int i;
10258
10259         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
10260                 vport = &hdev->vport[i];
10261                 if (hdev->roce_client) {
10262                         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10263                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10264                                 msleep(HCLGE_WAIT_RESET_DONE);
10265
10266                         hdev->roce_client->ops->uninit_instance(&vport->roce,
10267                                                                 0);
10268                         hdev->roce_client = NULL;
10269                         vport->roce.client = NULL;
10270                 }
10271                 if (client->type == HNAE3_CLIENT_ROCE)
10272                         return;
10273                 if (hdev->nic_client && client->ops->uninit_instance) {
10274                         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10275                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10276                                 msleep(HCLGE_WAIT_RESET_DONE);
10277
10278                         client->ops->uninit_instance(&vport->nic, 0);
10279                         hdev->nic_client = NULL;
10280                         vport->nic.client = NULL;
10281                 }
10282         }
10283 }
10284
10285 static int hclge_dev_mem_map(struct hclge_dev *hdev)
10286 {
10287 #define HCLGE_MEM_BAR           4
10288
10289         struct pci_dev *pdev = hdev->pdev;
10290         struct hclge_hw *hw = &hdev->hw;
10291
10292         /* for device does not have device memory, return directly */
10293         if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
10294                 return 0;
10295
10296         hw->mem_base = devm_ioremap_wc(&pdev->dev,
10297                                        pci_resource_start(pdev, HCLGE_MEM_BAR),
10298                                        pci_resource_len(pdev, HCLGE_MEM_BAR));
10299         if (!hw->mem_base) {
10300                 dev_err(&pdev->dev, "failed to map device memory\n");
10301                 return -EFAULT;
10302         }
10303
10304         return 0;
10305 }
10306
10307 static int hclge_pci_init(struct hclge_dev *hdev)
10308 {
10309         struct pci_dev *pdev = hdev->pdev;
10310         struct hclge_hw *hw;
10311         int ret;
10312
10313         ret = pci_enable_device(pdev);
10314         if (ret) {
10315                 dev_err(&pdev->dev, "failed to enable PCI device\n");
10316                 return ret;
10317         }
10318
10319         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10320         if (ret) {
10321                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10322                 if (ret) {
10323                         dev_err(&pdev->dev,
10324                                 "can't set consistent PCI DMA");
10325                         goto err_disable_device;
10326                 }
10327                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
10328         }
10329
10330         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
10331         if (ret) {
10332                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
10333                 goto err_disable_device;
10334         }
10335
10336         pci_set_master(pdev);
10337         hw = &hdev->hw;
10338         hw->io_base = pcim_iomap(pdev, 2, 0);
10339         if (!hw->io_base) {
10340                 dev_err(&pdev->dev, "Can't map configuration register space\n");
10341                 ret = -ENOMEM;
10342                 goto err_clr_master;
10343         }
10344
10345         ret = hclge_dev_mem_map(hdev);
10346         if (ret)
10347                 goto err_unmap_io_base;
10348
10349         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
10350
10351         return 0;
10352
10353 err_unmap_io_base:
10354         pcim_iounmap(pdev, hdev->hw.io_base);
10355 err_clr_master:
10356         pci_clear_master(pdev);
10357         pci_release_regions(pdev);
10358 err_disable_device:
10359         pci_disable_device(pdev);
10360
10361         return ret;
10362 }
10363
10364 static void hclge_pci_uninit(struct hclge_dev *hdev)
10365 {
10366         struct pci_dev *pdev = hdev->pdev;
10367
10368         if (hdev->hw.mem_base)
10369                 devm_iounmap(&pdev->dev, hdev->hw.mem_base);
10370
10371         pcim_iounmap(pdev, hdev->hw.io_base);
10372         pci_free_irq_vectors(pdev);
10373         pci_clear_master(pdev);
10374         pci_release_mem_regions(pdev);
10375         pci_disable_device(pdev);
10376 }
10377
10378 static void hclge_state_init(struct hclge_dev *hdev)
10379 {
10380         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
10381         set_bit(HCLGE_STATE_DOWN, &hdev->state);
10382         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
10383         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10384         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
10385         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
10386         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
10387 }
10388
10389 static void hclge_state_uninit(struct hclge_dev *hdev)
10390 {
10391         set_bit(HCLGE_STATE_DOWN, &hdev->state);
10392         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
10393
10394         if (hdev->reset_timer.function)
10395                 del_timer_sync(&hdev->reset_timer);
10396         if (hdev->service_task.work.func)
10397                 cancel_delayed_work_sync(&hdev->service_task);
10398 }
10399
10400 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
10401 {
10402 #define HCLGE_FLR_RETRY_WAIT_MS 500
10403 #define HCLGE_FLR_RETRY_CNT     5
10404
10405         struct hclge_dev *hdev = ae_dev->priv;
10406         int retry_cnt = 0;
10407         int ret;
10408
10409 retry:
10410         down(&hdev->reset_sem);
10411         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10412         hdev->reset_type = HNAE3_FLR_RESET;
10413         ret = hclge_reset_prepare(hdev);
10414         if (ret || hdev->reset_pending) {
10415                 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
10416                         ret);
10417                 if (hdev->reset_pending ||
10418                     retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
10419                         dev_err(&hdev->pdev->dev,
10420                                 "reset_pending:0x%lx, retry_cnt:%d\n",
10421                                 hdev->reset_pending, retry_cnt);
10422                         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10423                         up(&hdev->reset_sem);
10424                         msleep(HCLGE_FLR_RETRY_WAIT_MS);
10425                         goto retry;
10426                 }
10427         }
10428
10429         /* disable misc vector before FLR done */
10430         hclge_enable_vector(&hdev->misc_vector, false);
10431         set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
10432         hdev->rst_stats.flr_rst_cnt++;
10433 }
10434
10435 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
10436 {
10437         struct hclge_dev *hdev = ae_dev->priv;
10438         int ret;
10439
10440         hclge_enable_vector(&hdev->misc_vector, true);
10441
10442         ret = hclge_reset_rebuild(hdev);
10443         if (ret)
10444                 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
10445
10446         hdev->reset_type = HNAE3_NONE_RESET;
10447         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10448         up(&hdev->reset_sem);
10449 }
10450
10451 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
10452 {
10453         u16 i;
10454
10455         for (i = 0; i < hdev->num_alloc_vport; i++) {
10456                 struct hclge_vport *vport = &hdev->vport[i];
10457                 int ret;
10458
10459                  /* Send cmd to clear VF's FUNC_RST_ING */
10460                 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
10461                 if (ret)
10462                         dev_warn(&hdev->pdev->dev,
10463                                  "clear vf(%u) rst failed %d!\n",
10464                                  vport->vport_id, ret);
10465         }
10466 }
10467
10468 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
10469 {
10470         struct pci_dev *pdev = ae_dev->pdev;
10471         struct hclge_dev *hdev;
10472         int ret;
10473
10474         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
10475         if (!hdev)
10476                 return -ENOMEM;
10477
10478         hdev->pdev = pdev;
10479         hdev->ae_dev = ae_dev;
10480         hdev->reset_type = HNAE3_NONE_RESET;
10481         hdev->reset_level = HNAE3_FUNC_RESET;
10482         ae_dev->priv = hdev;
10483
10484         /* HW supprt 2 layer vlan */
10485         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10486
10487         mutex_init(&hdev->vport_lock);
10488         spin_lock_init(&hdev->fd_rule_lock);
10489         sema_init(&hdev->reset_sem, 1);
10490
10491         ret = hclge_pci_init(hdev);
10492         if (ret)
10493                 goto out;
10494
10495         /* Firmware command queue initialize */
10496         ret = hclge_cmd_queue_init(hdev);
10497         if (ret)
10498                 goto err_pci_uninit;
10499
10500         /* Firmware command initialize */
10501         ret = hclge_cmd_init(hdev);
10502         if (ret)
10503                 goto err_cmd_uninit;
10504
10505         ret = hclge_get_cap(hdev);
10506         if (ret)
10507                 goto err_cmd_uninit;
10508
10509         ret = hclge_query_dev_specs(hdev);
10510         if (ret) {
10511                 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
10512                         ret);
10513                 goto err_cmd_uninit;
10514         }
10515
10516         ret = hclge_configure(hdev);
10517         if (ret) {
10518                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
10519                 goto err_cmd_uninit;
10520         }
10521
10522         ret = hclge_init_msi(hdev);
10523         if (ret) {
10524                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
10525                 goto err_cmd_uninit;
10526         }
10527
10528         ret = hclge_misc_irq_init(hdev);
10529         if (ret)
10530                 goto err_msi_uninit;
10531
10532         ret = hclge_alloc_tqps(hdev);
10533         if (ret) {
10534                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
10535                 goto err_msi_irq_uninit;
10536         }
10537
10538         ret = hclge_alloc_vport(hdev);
10539         if (ret)
10540                 goto err_msi_irq_uninit;
10541
10542         ret = hclge_map_tqp(hdev);
10543         if (ret)
10544                 goto err_msi_irq_uninit;
10545
10546         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
10547                 ret = hclge_mac_mdio_config(hdev);
10548                 if (ret)
10549                         goto err_msi_irq_uninit;
10550         }
10551
10552         ret = hclge_init_umv_space(hdev);
10553         if (ret)
10554                 goto err_mdiobus_unreg;
10555
10556         ret = hclge_mac_init(hdev);
10557         if (ret) {
10558                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10559                 goto err_mdiobus_unreg;
10560         }
10561
10562         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10563         if (ret) {
10564                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10565                 goto err_mdiobus_unreg;
10566         }
10567
10568         ret = hclge_config_gro(hdev, true);
10569         if (ret)
10570                 goto err_mdiobus_unreg;
10571
10572         ret = hclge_init_vlan_config(hdev);
10573         if (ret) {
10574                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10575                 goto err_mdiobus_unreg;
10576         }
10577
10578         ret = hclge_tm_schd_init(hdev);
10579         if (ret) {
10580                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
10581                 goto err_mdiobus_unreg;
10582         }
10583
10584         hclge_rss_init_cfg(hdev);
10585         ret = hclge_rss_init_hw(hdev);
10586         if (ret) {
10587                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10588                 goto err_mdiobus_unreg;
10589         }
10590
10591         ret = init_mgr_tbl(hdev);
10592         if (ret) {
10593                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
10594                 goto err_mdiobus_unreg;
10595         }
10596
10597         ret = hclge_init_fd_config(hdev);
10598         if (ret) {
10599                 dev_err(&pdev->dev,
10600                         "fd table init fail, ret=%d\n", ret);
10601                 goto err_mdiobus_unreg;
10602         }
10603
10604         INIT_KFIFO(hdev->mac_tnl_log);
10605
10606         hclge_dcb_ops_set(hdev);
10607
10608         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
10609         INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
10610
10611         /* Setup affinity after service timer setup because add_timer_on
10612          * is called in affinity notify.
10613          */
10614         hclge_misc_affinity_setup(hdev);
10615
10616         hclge_clear_all_event_cause(hdev);
10617         hclge_clear_resetting_state(hdev);
10618
10619         /* Log and clear the hw errors those already occurred */
10620         hclge_handle_all_hns_hw_errors(ae_dev);
10621
10622         /* request delayed reset for the error recovery because an immediate
10623          * global reset on a PF affecting pending initialization of other PFs
10624          */
10625         if (ae_dev->hw_err_reset_req) {
10626                 enum hnae3_reset_type reset_level;
10627
10628                 reset_level = hclge_get_reset_level(ae_dev,
10629                                                     &ae_dev->hw_err_reset_req);
10630                 hclge_set_def_reset_request(ae_dev, reset_level);
10631                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10632         }
10633
10634         /* Enable MISC vector(vector0) */
10635         hclge_enable_vector(&hdev->misc_vector, true);
10636
10637         hclge_state_init(hdev);
10638         hdev->last_reset_time = jiffies;
10639
10640         dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10641                  HCLGE_DRIVER_NAME);
10642
10643         hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10644
10645         return 0;
10646
10647 err_mdiobus_unreg:
10648         if (hdev->hw.mac.phydev)
10649                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
10650 err_msi_irq_uninit:
10651         hclge_misc_irq_uninit(hdev);
10652 err_msi_uninit:
10653         pci_free_irq_vectors(pdev);
10654 err_cmd_uninit:
10655         hclge_cmd_uninit(hdev);
10656 err_pci_uninit:
10657         pcim_iounmap(pdev, hdev->hw.io_base);
10658         pci_clear_master(pdev);
10659         pci_release_regions(pdev);
10660         pci_disable_device(pdev);
10661 out:
10662         mutex_destroy(&hdev->vport_lock);
10663         return ret;
10664 }
10665
10666 static void hclge_stats_clear(struct hclge_dev *hdev)
10667 {
10668         memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
10669 }
10670
10671 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10672 {
10673         return hclge_config_switch_param(hdev, vf, enable,
10674                                          HCLGE_SWITCH_ANTI_SPOOF_MASK);
10675 }
10676
10677 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10678 {
10679         return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10680                                           HCLGE_FILTER_FE_NIC_INGRESS_B,
10681                                           enable, vf);
10682 }
10683
10684 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10685 {
10686         int ret;
10687
10688         ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10689         if (ret) {
10690                 dev_err(&hdev->pdev->dev,
10691                         "Set vf %d mac spoof check %s failed, ret=%d\n",
10692                         vf, enable ? "on" : "off", ret);
10693                 return ret;
10694         }
10695
10696         ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10697         if (ret)
10698                 dev_err(&hdev->pdev->dev,
10699                         "Set vf %d vlan spoof check %s failed, ret=%d\n",
10700                         vf, enable ? "on" : "off", ret);
10701
10702         return ret;
10703 }
10704
10705 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10706                                  bool enable)
10707 {
10708         struct hclge_vport *vport = hclge_get_vport(handle);
10709         struct hclge_dev *hdev = vport->back;
10710         u32 new_spoofchk = enable ? 1 : 0;
10711         int ret;
10712
10713         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10714                 return -EOPNOTSUPP;
10715
10716         vport = hclge_get_vf_vport(hdev, vf);
10717         if (!vport)
10718                 return -EINVAL;
10719
10720         if (vport->vf_info.spoofchk == new_spoofchk)
10721                 return 0;
10722
10723         if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
10724                 dev_warn(&hdev->pdev->dev,
10725                          "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
10726                          vf);
10727         else if (enable && hclge_is_umv_space_full(vport, true))
10728                 dev_warn(&hdev->pdev->dev,
10729                          "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
10730                          vf);
10731
10732         ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
10733         if (ret)
10734                 return ret;
10735
10736         vport->vf_info.spoofchk = new_spoofchk;
10737         return 0;
10738 }
10739
10740 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
10741 {
10742         struct hclge_vport *vport = hdev->vport;
10743         int ret;
10744         int i;
10745
10746         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10747                 return 0;
10748
10749         /* resume the vf spoof check state after reset */
10750         for (i = 0; i < hdev->num_alloc_vport; i++) {
10751                 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
10752                                                vport->vf_info.spoofchk);
10753                 if (ret)
10754                         return ret;
10755
10756                 vport++;
10757         }
10758
10759         return 0;
10760 }
10761
10762 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
10763 {
10764         struct hclge_vport *vport = hclge_get_vport(handle);
10765         struct hclge_dev *hdev = vport->back;
10766         struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
10767         u32 new_trusted = enable ? 1 : 0;
10768         bool en_bc_pmc;
10769         int ret;
10770
10771         vport = hclge_get_vf_vport(hdev, vf);
10772         if (!vport)
10773                 return -EINVAL;
10774
10775         if (vport->vf_info.trusted == new_trusted)
10776                 return 0;
10777
10778         /* Disable promisc mode for VF if it is not trusted any more. */
10779         if (!enable && vport->vf_info.promisc_enable) {
10780                 en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
10781                 ret = hclge_set_vport_promisc_mode(vport, false, false,
10782                                                    en_bc_pmc);
10783                 if (ret)
10784                         return ret;
10785                 vport->vf_info.promisc_enable = 0;
10786                 hclge_inform_vf_promisc_info(vport);
10787         }
10788
10789         vport->vf_info.trusted = new_trusted;
10790
10791         return 0;
10792 }
10793
10794 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
10795 {
10796         int ret;
10797         int vf;
10798
10799         /* reset vf rate to default value */
10800         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10801                 struct hclge_vport *vport = &hdev->vport[vf];
10802
10803                 vport->vf_info.max_tx_rate = 0;
10804                 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
10805                 if (ret)
10806                         dev_err(&hdev->pdev->dev,
10807                                 "vf%d failed to reset to default, ret=%d\n",
10808                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
10809         }
10810 }
10811
10812 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
10813                                      int min_tx_rate, int max_tx_rate)
10814 {
10815         if (min_tx_rate != 0 ||
10816             max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
10817                 dev_err(&hdev->pdev->dev,
10818                         "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
10819                         min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
10820                 return -EINVAL;
10821         }
10822
10823         return 0;
10824 }
10825
10826 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
10827                              int min_tx_rate, int max_tx_rate, bool force)
10828 {
10829         struct hclge_vport *vport = hclge_get_vport(handle);
10830         struct hclge_dev *hdev = vport->back;
10831         int ret;
10832
10833         ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
10834         if (ret)
10835                 return ret;
10836
10837         vport = hclge_get_vf_vport(hdev, vf);
10838         if (!vport)
10839                 return -EINVAL;
10840
10841         if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
10842                 return 0;
10843
10844         ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
10845         if (ret)
10846                 return ret;
10847
10848         vport->vf_info.max_tx_rate = max_tx_rate;
10849
10850         return 0;
10851 }
10852
10853 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
10854 {
10855         struct hnae3_handle *handle = &hdev->vport->nic;
10856         struct hclge_vport *vport;
10857         int ret;
10858         int vf;
10859
10860         /* resume the vf max_tx_rate after reset */
10861         for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
10862                 vport = hclge_get_vf_vport(hdev, vf);
10863                 if (!vport)
10864                         return -EINVAL;
10865
10866                 /* zero means max rate, after reset, firmware already set it to
10867                  * max rate, so just continue.
10868                  */
10869                 if (!vport->vf_info.max_tx_rate)
10870                         continue;
10871
10872                 ret = hclge_set_vf_rate(handle, vf, 0,
10873                                         vport->vf_info.max_tx_rate, true);
10874                 if (ret) {
10875                         dev_err(&hdev->pdev->dev,
10876                                 "vf%d failed to resume tx_rate:%u, ret=%d\n",
10877                                 vf, vport->vf_info.max_tx_rate, ret);
10878                         return ret;
10879                 }
10880         }
10881
10882         return 0;
10883 }
10884
10885 static void hclge_reset_vport_state(struct hclge_dev *hdev)
10886 {
10887         struct hclge_vport *vport = hdev->vport;
10888         int i;
10889
10890         for (i = 0; i < hdev->num_alloc_vport; i++) {
10891                 hclge_vport_stop(vport);
10892                 vport++;
10893         }
10894 }
10895
10896 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
10897 {
10898         struct hclge_dev *hdev = ae_dev->priv;
10899         struct pci_dev *pdev = ae_dev->pdev;
10900         int ret;
10901
10902         set_bit(HCLGE_STATE_DOWN, &hdev->state);
10903
10904         hclge_stats_clear(hdev);
10905         /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
10906          * so here should not clean table in memory.
10907          */
10908         if (hdev->reset_type == HNAE3_IMP_RESET ||
10909             hdev->reset_type == HNAE3_GLOBAL_RESET) {
10910                 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
10911                 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
10912                 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
10913                 hclge_reset_umv_space(hdev);
10914         }
10915
10916         ret = hclge_cmd_init(hdev);
10917         if (ret) {
10918                 dev_err(&pdev->dev, "Cmd queue init failed\n");
10919                 return ret;
10920         }
10921
10922         ret = hclge_map_tqp(hdev);
10923         if (ret) {
10924                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
10925                 return ret;
10926         }
10927
10928         ret = hclge_mac_init(hdev);
10929         if (ret) {
10930                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10931                 return ret;
10932         }
10933
10934         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10935         if (ret) {
10936                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10937                 return ret;
10938         }
10939
10940         ret = hclge_config_gro(hdev, true);
10941         if (ret)
10942                 return ret;
10943
10944         ret = hclge_init_vlan_config(hdev);
10945         if (ret) {
10946                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10947                 return ret;
10948         }
10949
10950         ret = hclge_tm_init_hw(hdev, true);
10951         if (ret) {
10952                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
10953                 return ret;
10954         }
10955
10956         ret = hclge_rss_init_hw(hdev);
10957         if (ret) {
10958                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10959                 return ret;
10960         }
10961
10962         ret = init_mgr_tbl(hdev);
10963         if (ret) {
10964                 dev_err(&pdev->dev,
10965                         "failed to reinit manager table, ret = %d\n", ret);
10966                 return ret;
10967         }
10968
10969         ret = hclge_init_fd_config(hdev);
10970         if (ret) {
10971                 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
10972                 return ret;
10973         }
10974
10975         /* Log and clear the hw errors those already occurred */
10976         hclge_handle_all_hns_hw_errors(ae_dev);
10977
10978         /* Re-enable the hw error interrupts because
10979          * the interrupts get disabled on global reset.
10980          */
10981         ret = hclge_config_nic_hw_error(hdev, true);
10982         if (ret) {
10983                 dev_err(&pdev->dev,
10984                         "fail(%d) to re-enable NIC hw error interrupts\n",
10985                         ret);
10986                 return ret;
10987         }
10988
10989         if (hdev->roce_client) {
10990                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
10991                 if (ret) {
10992                         dev_err(&pdev->dev,
10993                                 "fail(%d) to re-enable roce ras interrupts\n",
10994                                 ret);
10995                         return ret;
10996                 }
10997         }
10998
10999         hclge_reset_vport_state(hdev);
11000         ret = hclge_reset_vport_spoofchk(hdev);
11001         if (ret)
11002                 return ret;
11003
11004         ret = hclge_resume_vf_rate(hdev);
11005         if (ret)
11006                 return ret;
11007
11008         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
11009                  HCLGE_DRIVER_NAME);
11010
11011         return 0;
11012 }
11013
11014 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
11015 {
11016         struct hclge_dev *hdev = ae_dev->priv;
11017         struct hclge_mac *mac = &hdev->hw.mac;
11018
11019         hclge_reset_vf_rate(hdev);
11020         hclge_clear_vf_vlan(hdev);
11021         hclge_misc_affinity_teardown(hdev);
11022         hclge_state_uninit(hdev);
11023         hclge_uninit_mac_table(hdev);
11024
11025         if (mac->phydev)
11026                 mdiobus_unregister(mac->mdio_bus);
11027
11028         /* Disable MISC vector(vector0) */
11029         hclge_enable_vector(&hdev->misc_vector, false);
11030         synchronize_irq(hdev->misc_vector.vector_irq);
11031
11032         /* Disable all hw interrupts */
11033         hclge_config_mac_tnl_int(hdev, false);
11034         hclge_config_nic_hw_error(hdev, false);
11035         hclge_config_rocee_ras_interrupt(hdev, false);
11036
11037         hclge_cmd_uninit(hdev);
11038         hclge_misc_irq_uninit(hdev);
11039         hclge_pci_uninit(hdev);
11040         mutex_destroy(&hdev->vport_lock);
11041         hclge_uninit_vport_vlan_table(hdev);
11042         ae_dev->priv = NULL;
11043 }
11044
11045 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
11046 {
11047         struct hclge_vport *vport = hclge_get_vport(handle);
11048         struct hclge_dev *hdev = vport->back;
11049
11050         return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
11051 }
11052
11053 static void hclge_get_channels(struct hnae3_handle *handle,
11054                                struct ethtool_channels *ch)
11055 {
11056         ch->max_combined = hclge_get_max_channels(handle);
11057         ch->other_count = 1;
11058         ch->max_other = 1;
11059         ch->combined_count = handle->kinfo.rss_size;
11060 }
11061
11062 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
11063                                         u16 *alloc_tqps, u16 *max_rss_size)
11064 {
11065         struct hclge_vport *vport = hclge_get_vport(handle);
11066         struct hclge_dev *hdev = vport->back;
11067
11068         *alloc_tqps = vport->alloc_tqps;
11069         *max_rss_size = hdev->pf_rss_size_max;
11070 }
11071
11072 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
11073                               bool rxfh_configured)
11074 {
11075         struct hclge_vport *vport = hclge_get_vport(handle);
11076         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
11077         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
11078         struct hclge_dev *hdev = vport->back;
11079         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
11080         u16 cur_rss_size = kinfo->rss_size;
11081         u16 cur_tqps = kinfo->num_tqps;
11082         u16 tc_valid[HCLGE_MAX_TC_NUM];
11083         u16 roundup_size;
11084         u32 *rss_indir;
11085         unsigned int i;
11086         int ret;
11087
11088         kinfo->req_rss_size = new_tqps_num;
11089
11090         ret = hclge_tm_vport_map_update(hdev);
11091         if (ret) {
11092                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
11093                 return ret;
11094         }
11095
11096         roundup_size = roundup_pow_of_two(kinfo->rss_size);
11097         roundup_size = ilog2(roundup_size);
11098         /* Set the RSS TC mode according to the new RSS size */
11099         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
11100                 tc_valid[i] = 0;
11101
11102                 if (!(hdev->hw_tc_map & BIT(i)))
11103                         continue;
11104
11105                 tc_valid[i] = 1;
11106                 tc_size[i] = roundup_size;
11107                 tc_offset[i] = kinfo->rss_size * i;
11108         }
11109         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
11110         if (ret)
11111                 return ret;
11112
11113         /* RSS indirection table has been configuared by user */
11114         if (rxfh_configured)
11115                 goto out;
11116
11117         /* Reinitializes the rss indirect table according to the new RSS size */
11118         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
11119         if (!rss_indir)
11120                 return -ENOMEM;
11121
11122         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
11123                 rss_indir[i] = i % kinfo->rss_size;
11124
11125         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
11126         if (ret)
11127                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
11128                         ret);
11129
11130         kfree(rss_indir);
11131
11132 out:
11133         if (!ret)
11134                 dev_info(&hdev->pdev->dev,
11135                          "Channels changed, rss_size from %u to %u, tqps from %u to %u",
11136                          cur_rss_size, kinfo->rss_size,
11137                          cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
11138
11139         return ret;
11140 }
11141
11142 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
11143                               u32 *regs_num_64_bit)
11144 {
11145         struct hclge_desc desc;
11146         u32 total_num;
11147         int ret;
11148
11149         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
11150         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11151         if (ret) {
11152                 dev_err(&hdev->pdev->dev,
11153                         "Query register number cmd failed, ret = %d.\n", ret);
11154                 return ret;
11155         }
11156
11157         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
11158         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
11159
11160         total_num = *regs_num_32_bit + *regs_num_64_bit;
11161         if (!total_num)
11162                 return -EINVAL;
11163
11164         return 0;
11165 }
11166
11167 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11168                                  void *data)
11169 {
11170 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
11171 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
11172
11173         struct hclge_desc *desc;
11174         u32 *reg_val = data;
11175         __le32 *desc_data;
11176         int nodata_num;
11177         int cmd_num;
11178         int i, k, n;
11179         int ret;
11180
11181         if (regs_num == 0)
11182                 return 0;
11183
11184         nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
11185         cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
11186                                HCLGE_32_BIT_REG_RTN_DATANUM);
11187         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11188         if (!desc)
11189                 return -ENOMEM;
11190
11191         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
11192         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11193         if (ret) {
11194                 dev_err(&hdev->pdev->dev,
11195                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
11196                 kfree(desc);
11197                 return ret;
11198         }
11199
11200         for (i = 0; i < cmd_num; i++) {
11201                 if (i == 0) {
11202                         desc_data = (__le32 *)(&desc[i].data[0]);
11203                         n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
11204                 } else {
11205                         desc_data = (__le32 *)(&desc[i]);
11206                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
11207                 }
11208                 for (k = 0; k < n; k++) {
11209                         *reg_val++ = le32_to_cpu(*desc_data++);
11210
11211                         regs_num--;
11212                         if (!regs_num)
11213                                 break;
11214                 }
11215         }
11216
11217         kfree(desc);
11218         return 0;
11219 }
11220
11221 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11222                                  void *data)
11223 {
11224 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
11225 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
11226
11227         struct hclge_desc *desc;
11228         u64 *reg_val = data;
11229         __le64 *desc_data;
11230         int nodata_len;
11231         int cmd_num;
11232         int i, k, n;
11233         int ret;
11234
11235         if (regs_num == 0)
11236                 return 0;
11237
11238         nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
11239         cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
11240                                HCLGE_64_BIT_REG_RTN_DATANUM);
11241         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11242         if (!desc)
11243                 return -ENOMEM;
11244
11245         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
11246         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11247         if (ret) {
11248                 dev_err(&hdev->pdev->dev,
11249                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
11250                 kfree(desc);
11251                 return ret;
11252         }
11253
11254         for (i = 0; i < cmd_num; i++) {
11255                 if (i == 0) {
11256                         desc_data = (__le64 *)(&desc[i].data[0]);
11257                         n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
11258                 } else {
11259                         desc_data = (__le64 *)(&desc[i]);
11260                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
11261                 }
11262                 for (k = 0; k < n; k++) {
11263                         *reg_val++ = le64_to_cpu(*desc_data++);
11264
11265                         regs_num--;
11266                         if (!regs_num)
11267                                 break;
11268                 }
11269         }
11270
11271         kfree(desc);
11272         return 0;
11273 }
11274
11275 #define MAX_SEPARATE_NUM        4
11276 #define SEPARATOR_VALUE         0xFDFCFBFA
11277 #define REG_NUM_PER_LINE        4
11278 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
11279 #define REG_SEPARATOR_LINE      1
11280 #define REG_NUM_REMAIN_MASK     3
11281 #define BD_LIST_MAX_NUM         30
11282
11283 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
11284 {
11285         int i;
11286
11287         /* initialize command BD except the last one */
11288         for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
11289                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
11290                                            true);
11291                 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11292         }
11293
11294         /* initialize the last command BD */
11295         hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
11296
11297         return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
11298 }
11299
11300 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
11301                                     int *bd_num_list,
11302                                     u32 type_num)
11303 {
11304         u32 entries_per_desc, desc_index, index, offset, i;
11305         struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
11306         int ret;
11307
11308         ret = hclge_query_bd_num_cmd_send(hdev, desc);
11309         if (ret) {
11310                 dev_err(&hdev->pdev->dev,
11311                         "Get dfx bd num fail, status is %d.\n", ret);
11312                 return ret;
11313         }
11314
11315         entries_per_desc = ARRAY_SIZE(desc[0].data);
11316         for (i = 0; i < type_num; i++) {
11317                 offset = hclge_dfx_bd_offset_list[i];
11318                 index = offset % entries_per_desc;
11319                 desc_index = offset / entries_per_desc;
11320                 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
11321         }
11322
11323         return ret;
11324 }
11325
11326 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
11327                                   struct hclge_desc *desc_src, int bd_num,
11328                                   enum hclge_opcode_type cmd)
11329 {
11330         struct hclge_desc *desc = desc_src;
11331         int i, ret;
11332
11333         hclge_cmd_setup_basic_desc(desc, cmd, true);
11334         for (i = 0; i < bd_num - 1; i++) {
11335                 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11336                 desc++;
11337                 hclge_cmd_setup_basic_desc(desc, cmd, true);
11338         }
11339
11340         desc = desc_src;
11341         ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
11342         if (ret)
11343                 dev_err(&hdev->pdev->dev,
11344                         "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
11345                         cmd, ret);
11346
11347         return ret;
11348 }
11349
11350 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
11351                                     void *data)
11352 {
11353         int entries_per_desc, reg_num, separator_num, desc_index, index, i;
11354         struct hclge_desc *desc = desc_src;
11355         u32 *reg = data;
11356
11357         entries_per_desc = ARRAY_SIZE(desc->data);
11358         reg_num = entries_per_desc * bd_num;
11359         separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
11360         for (i = 0; i < reg_num; i++) {
11361                 index = i % entries_per_desc;
11362                 desc_index = i / entries_per_desc;
11363                 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
11364         }
11365         for (i = 0; i < separator_num; i++)
11366                 *reg++ = SEPARATOR_VALUE;
11367
11368         return reg_num + separator_num;
11369 }
11370
11371 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
11372 {
11373         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11374         int data_len_per_desc, bd_num, i;
11375         int bd_num_list[BD_LIST_MAX_NUM];
11376         u32 data_len;
11377         int ret;
11378
11379         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11380         if (ret) {
11381                 dev_err(&hdev->pdev->dev,
11382                         "Get dfx reg bd num fail, status is %d.\n", ret);
11383                 return ret;
11384         }
11385
11386         data_len_per_desc = sizeof_field(struct hclge_desc, data);
11387         *len = 0;
11388         for (i = 0; i < dfx_reg_type_num; i++) {
11389                 bd_num = bd_num_list[i];
11390                 data_len = data_len_per_desc * bd_num;
11391                 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
11392         }
11393
11394         return ret;
11395 }
11396
11397 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
11398 {
11399         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11400         int bd_num, bd_num_max, buf_len, i;
11401         int bd_num_list[BD_LIST_MAX_NUM];
11402         struct hclge_desc *desc_src;
11403         u32 *reg = data;
11404         int ret;
11405
11406         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11407         if (ret) {
11408                 dev_err(&hdev->pdev->dev,
11409                         "Get dfx reg bd num fail, status is %d.\n", ret);
11410                 return ret;
11411         }
11412
11413         bd_num_max = bd_num_list[0];
11414         for (i = 1; i < dfx_reg_type_num; i++)
11415                 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
11416
11417         buf_len = sizeof(*desc_src) * bd_num_max;
11418         desc_src = kzalloc(buf_len, GFP_KERNEL);
11419         if (!desc_src)
11420                 return -ENOMEM;
11421
11422         for (i = 0; i < dfx_reg_type_num; i++) {
11423                 bd_num = bd_num_list[i];
11424                 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
11425                                              hclge_dfx_reg_opcode_list[i]);
11426                 if (ret) {
11427                         dev_err(&hdev->pdev->dev,
11428                                 "Get dfx reg fail, status is %d.\n", ret);
11429                         break;
11430                 }
11431
11432                 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
11433         }
11434
11435         kfree(desc_src);
11436         return ret;
11437 }
11438
11439 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
11440                               struct hnae3_knic_private_info *kinfo)
11441 {
11442 #define HCLGE_RING_REG_OFFSET           0x200
11443 #define HCLGE_RING_INT_REG_OFFSET       0x4
11444
11445         int i, j, reg_num, separator_num;
11446         int data_num_sum;
11447         u32 *reg = data;
11448
11449         /* fetching per-PF registers valus from PF PCIe register space */
11450         reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
11451         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11452         for (i = 0; i < reg_num; i++)
11453                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
11454         for (i = 0; i < separator_num; i++)
11455                 *reg++ = SEPARATOR_VALUE;
11456         data_num_sum = reg_num + separator_num;
11457
11458         reg_num = ARRAY_SIZE(common_reg_addr_list);
11459         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11460         for (i = 0; i < reg_num; i++)
11461                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
11462         for (i = 0; i < separator_num; i++)
11463                 *reg++ = SEPARATOR_VALUE;
11464         data_num_sum += reg_num + separator_num;
11465
11466         reg_num = ARRAY_SIZE(ring_reg_addr_list);
11467         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11468         for (j = 0; j < kinfo->num_tqps; j++) {
11469                 for (i = 0; i < reg_num; i++)
11470                         *reg++ = hclge_read_dev(&hdev->hw,
11471                                                 ring_reg_addr_list[i] +
11472                                                 HCLGE_RING_REG_OFFSET * j);
11473                 for (i = 0; i < separator_num; i++)
11474                         *reg++ = SEPARATOR_VALUE;
11475         }
11476         data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
11477
11478         reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
11479         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11480         for (j = 0; j < hdev->num_msi_used - 1; j++) {
11481                 for (i = 0; i < reg_num; i++)
11482                         *reg++ = hclge_read_dev(&hdev->hw,
11483                                                 tqp_intr_reg_addr_list[i] +
11484                                                 HCLGE_RING_INT_REG_OFFSET * j);
11485                 for (i = 0; i < separator_num; i++)
11486                         *reg++ = SEPARATOR_VALUE;
11487         }
11488         data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
11489
11490         return data_num_sum;
11491 }
11492
11493 static int hclge_get_regs_len(struct hnae3_handle *handle)
11494 {
11495         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
11496         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11497         struct hclge_vport *vport = hclge_get_vport(handle);
11498         struct hclge_dev *hdev = vport->back;
11499         int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
11500         int regs_lines_32_bit, regs_lines_64_bit;
11501         int ret;
11502
11503         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
11504         if (ret) {
11505                 dev_err(&hdev->pdev->dev,
11506                         "Get register number failed, ret = %d.\n", ret);
11507                 return ret;
11508         }
11509
11510         ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
11511         if (ret) {
11512                 dev_err(&hdev->pdev->dev,
11513                         "Get dfx reg len failed, ret = %d.\n", ret);
11514                 return ret;
11515         }
11516
11517         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
11518                 REG_SEPARATOR_LINE;
11519         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
11520                 REG_SEPARATOR_LINE;
11521         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
11522                 REG_SEPARATOR_LINE;
11523         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
11524                 REG_SEPARATOR_LINE;
11525         regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
11526                 REG_SEPARATOR_LINE;
11527         regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
11528                 REG_SEPARATOR_LINE;
11529
11530         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
11531                 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
11532                 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
11533 }
11534
11535 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
11536                            void *data)
11537 {
11538         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11539         struct hclge_vport *vport = hclge_get_vport(handle);
11540         struct hclge_dev *hdev = vport->back;
11541         u32 regs_num_32_bit, regs_num_64_bit;
11542         int i, reg_num, separator_num, ret;
11543         u32 *reg = data;
11544
11545         *version = hdev->fw_version;
11546
11547         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
11548         if (ret) {
11549                 dev_err(&hdev->pdev->dev,
11550                         "Get register number failed, ret = %d.\n", ret);
11551                 return;
11552         }
11553
11554         reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
11555
11556         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
11557         if (ret) {
11558                 dev_err(&hdev->pdev->dev,
11559                         "Get 32 bit register failed, ret = %d.\n", ret);
11560                 return;
11561         }
11562         reg_num = regs_num_32_bit;
11563         reg += reg_num;
11564         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11565         for (i = 0; i < separator_num; i++)
11566                 *reg++ = SEPARATOR_VALUE;
11567
11568         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
11569         if (ret) {
11570                 dev_err(&hdev->pdev->dev,
11571                         "Get 64 bit register failed, ret = %d.\n", ret);
11572                 return;
11573         }
11574         reg_num = regs_num_64_bit * 2;
11575         reg += reg_num;
11576         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11577         for (i = 0; i < separator_num; i++)
11578                 *reg++ = SEPARATOR_VALUE;
11579
11580         ret = hclge_get_dfx_reg(hdev, reg);
11581         if (ret)
11582                 dev_err(&hdev->pdev->dev,
11583                         "Get dfx register failed, ret = %d.\n", ret);
11584 }
11585
11586 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
11587 {
11588         struct hclge_set_led_state_cmd *req;
11589         struct hclge_desc desc;
11590         int ret;
11591
11592         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11593
11594         req = (struct hclge_set_led_state_cmd *)desc.data;
11595         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11596                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
11597
11598         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11599         if (ret)
11600                 dev_err(&hdev->pdev->dev,
11601                         "Send set led state cmd error, ret =%d\n", ret);
11602
11603         return ret;
11604 }
11605
11606 enum hclge_led_status {
11607         HCLGE_LED_OFF,
11608         HCLGE_LED_ON,
11609         HCLGE_LED_NO_CHANGE = 0xFF,
11610 };
11611
11612 static int hclge_set_led_id(struct hnae3_handle *handle,
11613                             enum ethtool_phys_id_state status)
11614 {
11615         struct hclge_vport *vport = hclge_get_vport(handle);
11616         struct hclge_dev *hdev = vport->back;
11617
11618         switch (status) {
11619         case ETHTOOL_ID_ACTIVE:
11620                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
11621         case ETHTOOL_ID_INACTIVE:
11622                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
11623         default:
11624                 return -EINVAL;
11625         }
11626 }
11627
11628 static void hclge_get_link_mode(struct hnae3_handle *handle,
11629                                 unsigned long *supported,
11630                                 unsigned long *advertising)
11631 {
11632         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11633         struct hclge_vport *vport = hclge_get_vport(handle);
11634         struct hclge_dev *hdev = vport->back;
11635         unsigned int idx = 0;
11636
11637         for (; idx < size; idx++) {
11638                 supported[idx] = hdev->hw.mac.supported[idx];
11639                 advertising[idx] = hdev->hw.mac.advertising[idx];
11640         }
11641 }
11642
11643 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
11644 {
11645         struct hclge_vport *vport = hclge_get_vport(handle);
11646         struct hclge_dev *hdev = vport->back;
11647
11648         return hclge_config_gro(hdev, enable);
11649 }
11650
11651 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11652 {
11653         struct hclge_vport *vport = &hdev->vport[0];
11654         struct hnae3_handle *handle = &vport->nic;
11655         u8 tmp_flags;
11656         int ret;
11657
11658         if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11659                 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11660                 vport->last_promisc_flags = vport->overflow_promisc_flags;
11661         }
11662
11663         if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11664                 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11665                 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11666                                              tmp_flags & HNAE3_MPE);
11667                 if (!ret) {
11668                         clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11669                         hclge_enable_vlan_filter(handle,
11670                                                  tmp_flags & HNAE3_VLAN_FLTR);
11671                 }
11672         }
11673 }
11674
11675 static bool hclge_module_existed(struct hclge_dev *hdev)
11676 {
11677         struct hclge_desc desc;
11678         u32 existed;
11679         int ret;
11680
11681         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11682         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11683         if (ret) {
11684                 dev_err(&hdev->pdev->dev,
11685                         "failed to get SFP exist state, ret = %d\n", ret);
11686                 return false;
11687         }
11688
11689         existed = le32_to_cpu(desc.data[0]);
11690
11691         return existed != 0;
11692 }
11693
11694 /* need 6 bds(total 140 bytes) in one reading
11695  * return the number of bytes actually read, 0 means read failed.
11696  */
11697 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
11698                                      u32 len, u8 *data)
11699 {
11700         struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
11701         struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
11702         u16 read_len;
11703         u16 copy_len;
11704         int ret;
11705         int i;
11706
11707         /* setup all 6 bds to read module eeprom info. */
11708         for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11709                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
11710                                            true);
11711
11712                 /* bd0~bd4 need next flag */
11713                 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
11714                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11715         }
11716
11717         /* setup bd0, this bd contains offset and read length. */
11718         sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
11719         sfp_info_bd0->offset = cpu_to_le16((u16)offset);
11720         read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
11721         sfp_info_bd0->read_len = cpu_to_le16(read_len);
11722
11723         ret = hclge_cmd_send(&hdev->hw, desc, i);
11724         if (ret) {
11725                 dev_err(&hdev->pdev->dev,
11726                         "failed to get SFP eeprom info, ret = %d\n", ret);
11727                 return 0;
11728         }
11729
11730         /* copy sfp info from bd0 to out buffer. */
11731         copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
11732         memcpy(data, sfp_info_bd0->data, copy_len);
11733         read_len = copy_len;
11734
11735         /* copy sfp info from bd1~bd5 to out buffer if needed. */
11736         for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11737                 if (read_len >= len)
11738                         return read_len;
11739
11740                 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
11741                 memcpy(data + read_len, desc[i].data, copy_len);
11742                 read_len += copy_len;
11743         }
11744
11745         return read_len;
11746 }
11747
11748 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
11749                                    u32 len, u8 *data)
11750 {
11751         struct hclge_vport *vport = hclge_get_vport(handle);
11752         struct hclge_dev *hdev = vport->back;
11753         u32 read_len = 0;
11754         u16 data_len;
11755
11756         if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
11757                 return -EOPNOTSUPP;
11758
11759         if (!hclge_module_existed(hdev))
11760                 return -ENXIO;
11761
11762         while (read_len < len) {
11763                 data_len = hclge_get_sfp_eeprom_info(hdev,
11764                                                      offset + read_len,
11765                                                      len - read_len,
11766                                                      data + read_len);
11767                 if (!data_len)
11768                         return -EIO;
11769
11770                 read_len += data_len;
11771         }
11772
11773         return 0;
11774 }
11775
11776 static const struct hnae3_ae_ops hclge_ops = {
11777         .init_ae_dev = hclge_init_ae_dev,
11778         .uninit_ae_dev = hclge_uninit_ae_dev,
11779         .flr_prepare = hclge_flr_prepare,
11780         .flr_done = hclge_flr_done,
11781         .init_client_instance = hclge_init_client_instance,
11782         .uninit_client_instance = hclge_uninit_client_instance,
11783         .map_ring_to_vector = hclge_map_ring_to_vector,
11784         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
11785         .get_vector = hclge_get_vector,
11786         .put_vector = hclge_put_vector,
11787         .set_promisc_mode = hclge_set_promisc_mode,
11788         .request_update_promisc_mode = hclge_request_update_promisc_mode,
11789         .set_loopback = hclge_set_loopback,
11790         .start = hclge_ae_start,
11791         .stop = hclge_ae_stop,
11792         .client_start = hclge_client_start,
11793         .client_stop = hclge_client_stop,
11794         .get_status = hclge_get_status,
11795         .get_ksettings_an_result = hclge_get_ksettings_an_result,
11796         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
11797         .get_media_type = hclge_get_media_type,
11798         .check_port_speed = hclge_check_port_speed,
11799         .get_fec = hclge_get_fec,
11800         .set_fec = hclge_set_fec,
11801         .get_rss_key_size = hclge_get_rss_key_size,
11802         .get_rss_indir_size = hclge_get_rss_indir_size,
11803         .get_rss = hclge_get_rss,
11804         .set_rss = hclge_set_rss,
11805         .set_rss_tuple = hclge_set_rss_tuple,
11806         .get_rss_tuple = hclge_get_rss_tuple,
11807         .get_tc_size = hclge_get_tc_size,
11808         .get_mac_addr = hclge_get_mac_addr,
11809         .set_mac_addr = hclge_set_mac_addr,
11810         .do_ioctl = hclge_do_ioctl,
11811         .add_uc_addr = hclge_add_uc_addr,
11812         .rm_uc_addr = hclge_rm_uc_addr,
11813         .add_mc_addr = hclge_add_mc_addr,
11814         .rm_mc_addr = hclge_rm_mc_addr,
11815         .set_autoneg = hclge_set_autoneg,
11816         .get_autoneg = hclge_get_autoneg,
11817         .restart_autoneg = hclge_restart_autoneg,
11818         .halt_autoneg = hclge_halt_autoneg,
11819         .get_pauseparam = hclge_get_pauseparam,
11820         .set_pauseparam = hclge_set_pauseparam,
11821         .set_mtu = hclge_set_mtu,
11822         .reset_queue = hclge_reset_tqp,
11823         .get_stats = hclge_get_stats,
11824         .get_mac_stats = hclge_get_mac_stat,
11825         .update_stats = hclge_update_stats,
11826         .get_strings = hclge_get_strings,
11827         .get_sset_count = hclge_get_sset_count,
11828         .get_fw_version = hclge_get_fw_version,
11829         .get_mdix_mode = hclge_get_mdix_mode,
11830         .enable_vlan_filter = hclge_enable_vlan_filter,
11831         .set_vlan_filter = hclge_set_vlan_filter,
11832         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
11833         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
11834         .reset_event = hclge_reset_event,
11835         .get_reset_level = hclge_get_reset_level,
11836         .set_default_reset_request = hclge_set_def_reset_request,
11837         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
11838         .set_channels = hclge_set_channels,
11839         .get_channels = hclge_get_channels,
11840         .get_regs_len = hclge_get_regs_len,
11841         .get_regs = hclge_get_regs,
11842         .set_led_id = hclge_set_led_id,
11843         .get_link_mode = hclge_get_link_mode,
11844         .add_fd_entry = hclge_add_fd_entry,
11845         .del_fd_entry = hclge_del_fd_entry,
11846         .del_all_fd_entries = hclge_del_all_fd_entries,
11847         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
11848         .get_fd_rule_info = hclge_get_fd_rule_info,
11849         .get_fd_all_rules = hclge_get_all_rules,
11850         .enable_fd = hclge_enable_fd,
11851         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
11852         .dbg_run_cmd = hclge_dbg_run_cmd,
11853         .handle_hw_ras_error = hclge_handle_hw_ras_error,
11854         .get_hw_reset_stat = hclge_get_hw_reset_stat,
11855         .ae_dev_resetting = hclge_ae_dev_resetting,
11856         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
11857         .set_gro_en = hclge_gro_en,
11858         .get_global_queue_id = hclge_covert_handle_qid_global,
11859         .set_timer_task = hclge_set_timer_task,
11860         .mac_connect_phy = hclge_mac_connect_phy,
11861         .mac_disconnect_phy = hclge_mac_disconnect_phy,
11862         .get_vf_config = hclge_get_vf_config,
11863         .set_vf_link_state = hclge_set_vf_link_state,
11864         .set_vf_spoofchk = hclge_set_vf_spoofchk,
11865         .set_vf_trust = hclge_set_vf_trust,
11866         .set_vf_rate = hclge_set_vf_rate,
11867         .set_vf_mac = hclge_set_vf_mac,
11868         .get_module_eeprom = hclge_get_module_eeprom,
11869         .get_cmdq_stat = hclge_get_cmdq_stat,
11870         .add_cls_flower = hclge_add_cls_flower,
11871         .del_cls_flower = hclge_del_cls_flower,
11872         .cls_flower_active = hclge_is_cls_flower_active,
11873 };
11874
11875 static struct hnae3_ae_algo ae_algo = {
11876         .ops = &hclge_ops,
11877         .pdev_id_table = ae_algo_pci_tbl,
11878 };
11879
11880 static int hclge_init(void)
11881 {
11882         pr_info("%s is initializing\n", HCLGE_NAME);
11883
11884         hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
11885         if (!hclge_wq) {
11886                 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
11887                 return -ENOMEM;
11888         }
11889
11890         hnae3_register_ae_algo(&ae_algo);
11891
11892         return 0;
11893 }
11894
11895 static void hclge_exit(void)
11896 {
11897         hnae3_unregister_ae_algo(&ae_algo);
11898         destroy_workqueue(hclge_wq);
11899 }
11900 module_init(hclge_init);
11901 module_exit(hclge_exit);
11902
11903 MODULE_LICENSE("GPL");
11904 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
11905 MODULE_DESCRIPTION("HCLGE Driver");
11906 MODULE_VERSION(HCLGE_MOD_VERSION);