Merge tag 'nios2-5.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/lftan...
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/ipv6.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
23 #include "hclge_tm.h"
24 #include "hclge_err.h"
25 #include "hnae3.h"
26
27 #define HCLGE_NAME                      "hclge"
28 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
29 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30
31 #define HCLGE_BUF_SIZE_UNIT     256U
32 #define HCLGE_BUF_MUL_BY        2
33 #define HCLGE_BUF_DIV_BY        2
34 #define NEED_RESERVE_TC_NUM     2
35 #define BUF_MAX_PERCENT         100
36 #define BUF_RESERVE_PERCENT     90
37
38 #define HCLGE_RESET_MAX_FAIL_CNT        5
39 #define HCLGE_RESET_SYNC_TIME           100
40 #define HCLGE_PF_RESET_SYNC_TIME        20
41 #define HCLGE_PF_RESET_SYNC_CNT         1500
42
43 /* Get DFX BD number offset */
44 #define HCLGE_DFX_BIOS_BD_OFFSET        1
45 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
46 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
47 #define HCLGE_DFX_IGU_BD_OFFSET         4
48 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
49 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
50 #define HCLGE_DFX_NCSI_BD_OFFSET        7
51 #define HCLGE_DFX_RTC_BD_OFFSET         8
52 #define HCLGE_DFX_PPP_BD_OFFSET         9
53 #define HCLGE_DFX_RCB_BD_OFFSET         10
54 #define HCLGE_DFX_TQP_BD_OFFSET         11
55 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
56
57 #define HCLGE_LINK_STATUS_MS    10
58
59 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
60 static int hclge_init_vlan_config(struct hclge_dev *hdev);
61 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
62 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
63 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
64 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
65 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
66 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
67                                                    unsigned long *addr);
68 static int hclge_set_default_loopback(struct hclge_dev *hdev);
69
70 static void hclge_sync_mac_table(struct hclge_dev *hdev);
71 static void hclge_restore_hw_table(struct hclge_dev *hdev);
72 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
73
74 static struct hnae3_ae_algo ae_algo;
75
76 static struct workqueue_struct *hclge_wq;
77
78 static const struct pci_device_id ae_algo_pci_tbl[] = {
79         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
80         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
81         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
82         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
83         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
84         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
85         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
86         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
87         /* required last entry */
88         {0, }
89 };
90
91 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
92
93 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
94                                          HCLGE_CMDQ_TX_ADDR_H_REG,
95                                          HCLGE_CMDQ_TX_DEPTH_REG,
96                                          HCLGE_CMDQ_TX_TAIL_REG,
97                                          HCLGE_CMDQ_TX_HEAD_REG,
98                                          HCLGE_CMDQ_RX_ADDR_L_REG,
99                                          HCLGE_CMDQ_RX_ADDR_H_REG,
100                                          HCLGE_CMDQ_RX_DEPTH_REG,
101                                          HCLGE_CMDQ_RX_TAIL_REG,
102                                          HCLGE_CMDQ_RX_HEAD_REG,
103                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
104                                          HCLGE_CMDQ_INTR_STS_REG,
105                                          HCLGE_CMDQ_INTR_EN_REG,
106                                          HCLGE_CMDQ_INTR_GEN_REG};
107
108 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
109                                            HCLGE_VECTOR0_OTER_EN_REG,
110                                            HCLGE_MISC_RESET_STS_REG,
111                                            HCLGE_MISC_VECTOR_INT_STS,
112                                            HCLGE_GLOBAL_RESET_REG,
113                                            HCLGE_FUN_RST_ING,
114                                            HCLGE_GRO_EN_REG};
115
116 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
117                                          HCLGE_RING_RX_ADDR_H_REG,
118                                          HCLGE_RING_RX_BD_NUM_REG,
119                                          HCLGE_RING_RX_BD_LENGTH_REG,
120                                          HCLGE_RING_RX_MERGE_EN_REG,
121                                          HCLGE_RING_RX_TAIL_REG,
122                                          HCLGE_RING_RX_HEAD_REG,
123                                          HCLGE_RING_RX_FBD_NUM_REG,
124                                          HCLGE_RING_RX_OFFSET_REG,
125                                          HCLGE_RING_RX_FBD_OFFSET_REG,
126                                          HCLGE_RING_RX_STASH_REG,
127                                          HCLGE_RING_RX_BD_ERR_REG,
128                                          HCLGE_RING_TX_ADDR_L_REG,
129                                          HCLGE_RING_TX_ADDR_H_REG,
130                                          HCLGE_RING_TX_BD_NUM_REG,
131                                          HCLGE_RING_TX_PRIORITY_REG,
132                                          HCLGE_RING_TX_TC_REG,
133                                          HCLGE_RING_TX_MERGE_EN_REG,
134                                          HCLGE_RING_TX_TAIL_REG,
135                                          HCLGE_RING_TX_HEAD_REG,
136                                          HCLGE_RING_TX_FBD_NUM_REG,
137                                          HCLGE_RING_TX_OFFSET_REG,
138                                          HCLGE_RING_TX_EBD_NUM_REG,
139                                          HCLGE_RING_TX_EBD_OFFSET_REG,
140                                          HCLGE_RING_TX_BD_ERR_REG,
141                                          HCLGE_RING_EN_REG};
142
143 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
144                                              HCLGE_TQP_INTR_GL0_REG,
145                                              HCLGE_TQP_INTR_GL1_REG,
146                                              HCLGE_TQP_INTR_GL2_REG,
147                                              HCLGE_TQP_INTR_RL_REG};
148
149 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
150         "App    Loopback test",
151         "Serdes serial Loopback test",
152         "Serdes parallel Loopback test",
153         "Phy    Loopback test"
154 };
155
156 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
157         {"mac_tx_mac_pause_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
159         {"mac_rx_mac_pause_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
161         {"mac_tx_control_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
163         {"mac_rx_control_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
165         {"mac_tx_pfc_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
167         {"mac_tx_pfc_pri0_pkt_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
169         {"mac_tx_pfc_pri1_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
171         {"mac_tx_pfc_pri2_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
173         {"mac_tx_pfc_pri3_pkt_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
175         {"mac_tx_pfc_pri4_pkt_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
177         {"mac_tx_pfc_pri5_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
179         {"mac_tx_pfc_pri6_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
181         {"mac_tx_pfc_pri7_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
183         {"mac_rx_pfc_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
185         {"mac_rx_pfc_pri0_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
187         {"mac_rx_pfc_pri1_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
189         {"mac_rx_pfc_pri2_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
191         {"mac_rx_pfc_pri3_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
193         {"mac_rx_pfc_pri4_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
195         {"mac_rx_pfc_pri5_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
197         {"mac_rx_pfc_pri6_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
199         {"mac_rx_pfc_pri7_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
201         {"mac_tx_total_pkt_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
203         {"mac_tx_total_oct_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
205         {"mac_tx_good_pkt_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
207         {"mac_tx_bad_pkt_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
209         {"mac_tx_good_oct_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
211         {"mac_tx_bad_oct_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
213         {"mac_tx_uni_pkt_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
215         {"mac_tx_multi_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
217         {"mac_tx_broad_pkt_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
219         {"mac_tx_undersize_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
221         {"mac_tx_oversize_pkt_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
223         {"mac_tx_64_oct_pkt_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
225         {"mac_tx_65_127_oct_pkt_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
227         {"mac_tx_128_255_oct_pkt_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
229         {"mac_tx_256_511_oct_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
231         {"mac_tx_512_1023_oct_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
233         {"mac_tx_1024_1518_oct_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
235         {"mac_tx_1519_2047_oct_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
237         {"mac_tx_2048_4095_oct_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
239         {"mac_tx_4096_8191_oct_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
241         {"mac_tx_8192_9216_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
243         {"mac_tx_9217_12287_oct_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
245         {"mac_tx_12288_16383_oct_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
247         {"mac_tx_1519_max_good_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
249         {"mac_tx_1519_max_bad_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
251         {"mac_rx_total_pkt_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
253         {"mac_rx_total_oct_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
255         {"mac_rx_good_pkt_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
257         {"mac_rx_bad_pkt_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
259         {"mac_rx_good_oct_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
261         {"mac_rx_bad_oct_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
263         {"mac_rx_uni_pkt_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
265         {"mac_rx_multi_pkt_num",
266                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
267         {"mac_rx_broad_pkt_num",
268                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
269         {"mac_rx_undersize_pkt_num",
270                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
271         {"mac_rx_oversize_pkt_num",
272                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
273         {"mac_rx_64_oct_pkt_num",
274                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
275         {"mac_rx_65_127_oct_pkt_num",
276                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
277         {"mac_rx_128_255_oct_pkt_num",
278                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
279         {"mac_rx_256_511_oct_pkt_num",
280                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
281         {"mac_rx_512_1023_oct_pkt_num",
282                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
283         {"mac_rx_1024_1518_oct_pkt_num",
284                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
285         {"mac_rx_1519_2047_oct_pkt_num",
286                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
287         {"mac_rx_2048_4095_oct_pkt_num",
288                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
289         {"mac_rx_4096_8191_oct_pkt_num",
290                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
291         {"mac_rx_8192_9216_oct_pkt_num",
292                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
293         {"mac_rx_9217_12287_oct_pkt_num",
294                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
295         {"mac_rx_12288_16383_oct_pkt_num",
296                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
297         {"mac_rx_1519_max_good_pkt_num",
298                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
299         {"mac_rx_1519_max_bad_pkt_num",
300                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
301
302         {"mac_tx_fragment_pkt_num",
303                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
304         {"mac_tx_undermin_pkt_num",
305                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
306         {"mac_tx_jabber_pkt_num",
307                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
308         {"mac_tx_err_all_pkt_num",
309                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
310         {"mac_tx_from_app_good_pkt_num",
311                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
312         {"mac_tx_from_app_bad_pkt_num",
313                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
314         {"mac_rx_fragment_pkt_num",
315                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
316         {"mac_rx_undermin_pkt_num",
317                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
318         {"mac_rx_jabber_pkt_num",
319                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
320         {"mac_rx_fcs_err_pkt_num",
321                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
322         {"mac_rx_send_app_good_pkt_num",
323                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
324         {"mac_rx_send_app_bad_pkt_num",
325                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
326 };
327
328 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
329         {
330                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
331                 .ethter_type = cpu_to_le16(ETH_P_LLDP),
332                 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
333                 .i_port_bitmap = 0x1,
334         },
335 };
336
337 static const u8 hclge_hash_key[] = {
338         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
339         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
340         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
341         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
342         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
343 };
344
345 static const u32 hclge_dfx_bd_offset_list[] = {
346         HCLGE_DFX_BIOS_BD_OFFSET,
347         HCLGE_DFX_SSU_0_BD_OFFSET,
348         HCLGE_DFX_SSU_1_BD_OFFSET,
349         HCLGE_DFX_IGU_BD_OFFSET,
350         HCLGE_DFX_RPU_0_BD_OFFSET,
351         HCLGE_DFX_RPU_1_BD_OFFSET,
352         HCLGE_DFX_NCSI_BD_OFFSET,
353         HCLGE_DFX_RTC_BD_OFFSET,
354         HCLGE_DFX_PPP_BD_OFFSET,
355         HCLGE_DFX_RCB_BD_OFFSET,
356         HCLGE_DFX_TQP_BD_OFFSET,
357         HCLGE_DFX_SSU_2_BD_OFFSET
358 };
359
360 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
361         HCLGE_OPC_DFX_BIOS_COMMON_REG,
362         HCLGE_OPC_DFX_SSU_REG_0,
363         HCLGE_OPC_DFX_SSU_REG_1,
364         HCLGE_OPC_DFX_IGU_EGU_REG,
365         HCLGE_OPC_DFX_RPU_REG_0,
366         HCLGE_OPC_DFX_RPU_REG_1,
367         HCLGE_OPC_DFX_NCSI_REG,
368         HCLGE_OPC_DFX_RTC_REG,
369         HCLGE_OPC_DFX_PPP_REG,
370         HCLGE_OPC_DFX_RCB_REG,
371         HCLGE_OPC_DFX_TQP_REG,
372         HCLGE_OPC_DFX_SSU_REG_2
373 };
374
375 static const struct key_info meta_data_key_info[] = {
376         { PACKET_TYPE_ID, 6},
377         { IP_FRAGEMENT, 1},
378         { ROCE_TYPE, 1},
379         { NEXT_KEY, 5},
380         { VLAN_NUMBER, 2},
381         { SRC_VPORT, 12},
382         { DST_VPORT, 12},
383         { TUNNEL_PACKET, 1},
384 };
385
386 static const struct key_info tuple_key_info[] = {
387         { OUTER_DST_MAC, 48},
388         { OUTER_SRC_MAC, 48},
389         { OUTER_VLAN_TAG_FST, 16},
390         { OUTER_VLAN_TAG_SEC, 16},
391         { OUTER_ETH_TYPE, 16},
392         { OUTER_L2_RSV, 16},
393         { OUTER_IP_TOS, 8},
394         { OUTER_IP_PROTO, 8},
395         { OUTER_SRC_IP, 32},
396         { OUTER_DST_IP, 32},
397         { OUTER_L3_RSV, 16},
398         { OUTER_SRC_PORT, 16},
399         { OUTER_DST_PORT, 16},
400         { OUTER_L4_RSV, 32},
401         { OUTER_TUN_VNI, 24},
402         { OUTER_TUN_FLOW_ID, 8},
403         { INNER_DST_MAC, 48},
404         { INNER_SRC_MAC, 48},
405         { INNER_VLAN_TAG_FST, 16},
406         { INNER_VLAN_TAG_SEC, 16},
407         { INNER_ETH_TYPE, 16},
408         { INNER_L2_RSV, 16},
409         { INNER_IP_TOS, 8},
410         { INNER_IP_PROTO, 8},
411         { INNER_SRC_IP, 32},
412         { INNER_DST_IP, 32},
413         { INNER_L3_RSV, 16},
414         { INNER_SRC_PORT, 16},
415         { INNER_DST_PORT, 16},
416         { INNER_L4_RSV, 32},
417 };
418
419 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
420 {
421 #define HCLGE_MAC_CMD_NUM 21
422
423         u64 *data = (u64 *)(&hdev->mac_stats);
424         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
425         __le64 *desc_data;
426         int i, k, n;
427         int ret;
428
429         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
430         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
431         if (ret) {
432                 dev_err(&hdev->pdev->dev,
433                         "Get MAC pkt stats fail, status = %d.\n", ret);
434
435                 return ret;
436         }
437
438         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
439                 /* for special opcode 0032, only the first desc has the head */
440                 if (unlikely(i == 0)) {
441                         desc_data = (__le64 *)(&desc[i].data[0]);
442                         n = HCLGE_RD_FIRST_STATS_NUM;
443                 } else {
444                         desc_data = (__le64 *)(&desc[i]);
445                         n = HCLGE_RD_OTHER_STATS_NUM;
446                 }
447
448                 for (k = 0; k < n; k++) {
449                         *data += le64_to_cpu(*desc_data);
450                         data++;
451                         desc_data++;
452                 }
453         }
454
455         return 0;
456 }
457
458 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
459 {
460         u64 *data = (u64 *)(&hdev->mac_stats);
461         struct hclge_desc *desc;
462         __le64 *desc_data;
463         u16 i, k, n;
464         int ret;
465
466         /* This may be called inside atomic sections,
467          * so GFP_ATOMIC is more suitalbe here
468          */
469         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
470         if (!desc)
471                 return -ENOMEM;
472
473         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
474         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
475         if (ret) {
476                 kfree(desc);
477                 return ret;
478         }
479
480         for (i = 0; i < desc_num; i++) {
481                 /* for special opcode 0034, only the first desc has the head */
482                 if (i == 0) {
483                         desc_data = (__le64 *)(&desc[i].data[0]);
484                         n = HCLGE_RD_FIRST_STATS_NUM;
485                 } else {
486                         desc_data = (__le64 *)(&desc[i]);
487                         n = HCLGE_RD_OTHER_STATS_NUM;
488                 }
489
490                 for (k = 0; k < n; k++) {
491                         *data += le64_to_cpu(*desc_data);
492                         data++;
493                         desc_data++;
494                 }
495         }
496
497         kfree(desc);
498
499         return 0;
500 }
501
502 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
503 {
504         struct hclge_desc desc;
505         __le32 *desc_data;
506         u32 reg_num;
507         int ret;
508
509         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
510         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
511         if (ret)
512                 return ret;
513
514         desc_data = (__le32 *)(&desc.data[0]);
515         reg_num = le32_to_cpu(*desc_data);
516
517         *desc_num = 1 + ((reg_num - 3) >> 2) +
518                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
519
520         return 0;
521 }
522
523 static int hclge_mac_update_stats(struct hclge_dev *hdev)
524 {
525         u32 desc_num;
526         int ret;
527
528         ret = hclge_mac_query_reg_num(hdev, &desc_num);
529
530         /* The firmware supports the new statistics acquisition method */
531         if (!ret)
532                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
533         else if (ret == -EOPNOTSUPP)
534                 ret = hclge_mac_update_stats_defective(hdev);
535         else
536                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
537
538         return ret;
539 }
540
541 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
542 {
543         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
544         struct hclge_vport *vport = hclge_get_vport(handle);
545         struct hclge_dev *hdev = vport->back;
546         struct hnae3_queue *queue;
547         struct hclge_desc desc[1];
548         struct hclge_tqp *tqp;
549         int ret, i;
550
551         for (i = 0; i < kinfo->num_tqps; i++) {
552                 queue = handle->kinfo.tqp[i];
553                 tqp = container_of(queue, struct hclge_tqp, q);
554                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
555                 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
556                                            true);
557
558                 desc[0].data[0] = cpu_to_le32(tqp->index);
559                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
560                 if (ret) {
561                         dev_err(&hdev->pdev->dev,
562                                 "Query tqp stat fail, status = %d,queue = %d\n",
563                                 ret, i);
564                         return ret;
565                 }
566                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
567                         le32_to_cpu(desc[0].data[1]);
568         }
569
570         for (i = 0; i < kinfo->num_tqps; i++) {
571                 queue = handle->kinfo.tqp[i];
572                 tqp = container_of(queue, struct hclge_tqp, q);
573                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
574                 hclge_cmd_setup_basic_desc(&desc[0],
575                                            HCLGE_OPC_QUERY_TX_STATS,
576                                            true);
577
578                 desc[0].data[0] = cpu_to_le32(tqp->index);
579                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
580                 if (ret) {
581                         dev_err(&hdev->pdev->dev,
582                                 "Query tqp stat fail, status = %d,queue = %d\n",
583                                 ret, i);
584                         return ret;
585                 }
586                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
587                         le32_to_cpu(desc[0].data[1]);
588         }
589
590         return 0;
591 }
592
593 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
594 {
595         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
596         struct hclge_tqp *tqp;
597         u64 *buff = data;
598         int i;
599
600         for (i = 0; i < kinfo->num_tqps; i++) {
601                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
602                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
603         }
604
605         for (i = 0; i < kinfo->num_tqps; i++) {
606                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
607                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
608         }
609
610         return buff;
611 }
612
613 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
614 {
615         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
616
617         /* each tqp has TX & RX two queues */
618         return kinfo->num_tqps * (2);
619 }
620
621 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
622 {
623         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
624         u8 *buff = data;
625         int i;
626
627         for (i = 0; i < kinfo->num_tqps; i++) {
628                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
629                         struct hclge_tqp, q);
630                 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
631                          tqp->index);
632                 buff = buff + ETH_GSTRING_LEN;
633         }
634
635         for (i = 0; i < kinfo->num_tqps; i++) {
636                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
637                         struct hclge_tqp, q);
638                 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
639                          tqp->index);
640                 buff = buff + ETH_GSTRING_LEN;
641         }
642
643         return buff;
644 }
645
646 static u64 *hclge_comm_get_stats(const void *comm_stats,
647                                  const struct hclge_comm_stats_str strs[],
648                                  int size, u64 *data)
649 {
650         u64 *buf = data;
651         u32 i;
652
653         for (i = 0; i < size; i++)
654                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
655
656         return buf + size;
657 }
658
659 static u8 *hclge_comm_get_strings(u32 stringset,
660                                   const struct hclge_comm_stats_str strs[],
661                                   int size, u8 *data)
662 {
663         char *buff = (char *)data;
664         u32 i;
665
666         if (stringset != ETH_SS_STATS)
667                 return buff;
668
669         for (i = 0; i < size; i++) {
670                 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
671                 buff = buff + ETH_GSTRING_LEN;
672         }
673
674         return (u8 *)buff;
675 }
676
677 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
678 {
679         struct hnae3_handle *handle;
680         int status;
681
682         handle = &hdev->vport[0].nic;
683         if (handle->client) {
684                 status = hclge_tqps_update_stats(handle);
685                 if (status) {
686                         dev_err(&hdev->pdev->dev,
687                                 "Update TQPS stats fail, status = %d.\n",
688                                 status);
689                 }
690         }
691
692         status = hclge_mac_update_stats(hdev);
693         if (status)
694                 dev_err(&hdev->pdev->dev,
695                         "Update MAC stats fail, status = %d.\n", status);
696 }
697
698 static void hclge_update_stats(struct hnae3_handle *handle,
699                                struct net_device_stats *net_stats)
700 {
701         struct hclge_vport *vport = hclge_get_vport(handle);
702         struct hclge_dev *hdev = vport->back;
703         int status;
704
705         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
706                 return;
707
708         status = hclge_mac_update_stats(hdev);
709         if (status)
710                 dev_err(&hdev->pdev->dev,
711                         "Update MAC stats fail, status = %d.\n",
712                         status);
713
714         status = hclge_tqps_update_stats(handle);
715         if (status)
716                 dev_err(&hdev->pdev->dev,
717                         "Update TQPS stats fail, status = %d.\n",
718                         status);
719
720         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
721 }
722
723 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
724 {
725 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
726                 HNAE3_SUPPORT_PHY_LOOPBACK |\
727                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
728                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
729
730         struct hclge_vport *vport = hclge_get_vport(handle);
731         struct hclge_dev *hdev = vport->back;
732         int count = 0;
733
734         /* Loopback test support rules:
735          * mac: only GE mode support
736          * serdes: all mac mode will support include GE/XGE/LGE/CGE
737          * phy: only support when phy device exist on board
738          */
739         if (stringset == ETH_SS_TEST) {
740                 /* clear loopback bit flags at first */
741                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
742                 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
743                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
744                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
745                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
746                         count += 1;
747                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
748                 }
749
750                 count += 2;
751                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
752                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
753
754                 if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
755                     hdev->hw.mac.phydev->drv->set_loopback) {
756                         count += 1;
757                         handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
758                 }
759
760         } else if (stringset == ETH_SS_STATS) {
761                 count = ARRAY_SIZE(g_mac_stats_string) +
762                         hclge_tqps_get_sset_count(handle, stringset);
763         }
764
765         return count;
766 }
767
768 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
769                               u8 *data)
770 {
771         u8 *p = (char *)data;
772         int size;
773
774         if (stringset == ETH_SS_STATS) {
775                 size = ARRAY_SIZE(g_mac_stats_string);
776                 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
777                                            size, p);
778                 p = hclge_tqps_get_strings(handle, p);
779         } else if (stringset == ETH_SS_TEST) {
780                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
781                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
782                                ETH_GSTRING_LEN);
783                         p += ETH_GSTRING_LEN;
784                 }
785                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
786                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
787                                ETH_GSTRING_LEN);
788                         p += ETH_GSTRING_LEN;
789                 }
790                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
791                         memcpy(p,
792                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
793                                ETH_GSTRING_LEN);
794                         p += ETH_GSTRING_LEN;
795                 }
796                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
797                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
798                                ETH_GSTRING_LEN);
799                         p += ETH_GSTRING_LEN;
800                 }
801         }
802 }
803
804 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
805 {
806         struct hclge_vport *vport = hclge_get_vport(handle);
807         struct hclge_dev *hdev = vport->back;
808         u64 *p;
809
810         p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
811                                  ARRAY_SIZE(g_mac_stats_string), data);
812         p = hclge_tqps_get_stats(handle, p);
813 }
814
815 static void hclge_get_mac_stat(struct hnae3_handle *handle,
816                                struct hns3_mac_stats *mac_stats)
817 {
818         struct hclge_vport *vport = hclge_get_vport(handle);
819         struct hclge_dev *hdev = vport->back;
820
821         hclge_update_stats(handle, NULL);
822
823         mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
824         mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
825 }
826
827 static int hclge_parse_func_status(struct hclge_dev *hdev,
828                                    struct hclge_func_status_cmd *status)
829 {
830 #define HCLGE_MAC_ID_MASK       0xF
831
832         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
833                 return -EINVAL;
834
835         /* Set the pf to main pf */
836         if (status->pf_state & HCLGE_PF_STATE_MAIN)
837                 hdev->flag |= HCLGE_FLAG_MAIN;
838         else
839                 hdev->flag &= ~HCLGE_FLAG_MAIN;
840
841         hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
842         return 0;
843 }
844
845 static int hclge_query_function_status(struct hclge_dev *hdev)
846 {
847 #define HCLGE_QUERY_MAX_CNT     5
848
849         struct hclge_func_status_cmd *req;
850         struct hclge_desc desc;
851         int timeout = 0;
852         int ret;
853
854         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
855         req = (struct hclge_func_status_cmd *)desc.data;
856
857         do {
858                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
859                 if (ret) {
860                         dev_err(&hdev->pdev->dev,
861                                 "query function status failed %d.\n", ret);
862                         return ret;
863                 }
864
865                 /* Check pf reset is done */
866                 if (req->pf_state)
867                         break;
868                 usleep_range(1000, 2000);
869         } while (timeout++ < HCLGE_QUERY_MAX_CNT);
870
871         return hclge_parse_func_status(hdev, req);
872 }
873
874 static int hclge_query_pf_resource(struct hclge_dev *hdev)
875 {
876         struct hclge_pf_res_cmd *req;
877         struct hclge_desc desc;
878         int ret;
879
880         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
881         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
882         if (ret) {
883                 dev_err(&hdev->pdev->dev,
884                         "query pf resource failed %d.\n", ret);
885                 return ret;
886         }
887
888         req = (struct hclge_pf_res_cmd *)desc.data;
889         hdev->num_tqps = le16_to_cpu(req->tqp_num) +
890                          le16_to_cpu(req->ext_tqp_num);
891         hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
892
893         if (req->tx_buf_size)
894                 hdev->tx_buf_size =
895                         le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
896         else
897                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
898
899         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
900
901         if (req->dv_buf_size)
902                 hdev->dv_buf_size =
903                         le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
904         else
905                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
906
907         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
908
909         hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
910         if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
911                 dev_err(&hdev->pdev->dev,
912                         "only %u msi resources available, not enough for pf(min:2).\n",
913                         hdev->num_nic_msi);
914                 return -EINVAL;
915         }
916
917         if (hnae3_dev_roce_supported(hdev)) {
918                 hdev->num_roce_msi =
919                         le16_to_cpu(req->pf_intr_vector_number_roce);
920
921                 /* PF should have NIC vectors and Roce vectors,
922                  * NIC vectors are queued before Roce vectors.
923                  */
924                 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
925         } else {
926                 hdev->num_msi = hdev->num_nic_msi;
927         }
928
929         return 0;
930 }
931
932 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
933 {
934         switch (speed_cmd) {
935         case 6:
936                 *speed = HCLGE_MAC_SPEED_10M;
937                 break;
938         case 7:
939                 *speed = HCLGE_MAC_SPEED_100M;
940                 break;
941         case 0:
942                 *speed = HCLGE_MAC_SPEED_1G;
943                 break;
944         case 1:
945                 *speed = HCLGE_MAC_SPEED_10G;
946                 break;
947         case 2:
948                 *speed = HCLGE_MAC_SPEED_25G;
949                 break;
950         case 3:
951                 *speed = HCLGE_MAC_SPEED_40G;
952                 break;
953         case 4:
954                 *speed = HCLGE_MAC_SPEED_50G;
955                 break;
956         case 5:
957                 *speed = HCLGE_MAC_SPEED_100G;
958                 break;
959         case 8:
960                 *speed = HCLGE_MAC_SPEED_200G;
961                 break;
962         default:
963                 return -EINVAL;
964         }
965
966         return 0;
967 }
968
969 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
970 {
971         struct hclge_vport *vport = hclge_get_vport(handle);
972         struct hclge_dev *hdev = vport->back;
973         u32 speed_ability = hdev->hw.mac.speed_ability;
974         u32 speed_bit = 0;
975
976         switch (speed) {
977         case HCLGE_MAC_SPEED_10M:
978                 speed_bit = HCLGE_SUPPORT_10M_BIT;
979                 break;
980         case HCLGE_MAC_SPEED_100M:
981                 speed_bit = HCLGE_SUPPORT_100M_BIT;
982                 break;
983         case HCLGE_MAC_SPEED_1G:
984                 speed_bit = HCLGE_SUPPORT_1G_BIT;
985                 break;
986         case HCLGE_MAC_SPEED_10G:
987                 speed_bit = HCLGE_SUPPORT_10G_BIT;
988                 break;
989         case HCLGE_MAC_SPEED_25G:
990                 speed_bit = HCLGE_SUPPORT_25G_BIT;
991                 break;
992         case HCLGE_MAC_SPEED_40G:
993                 speed_bit = HCLGE_SUPPORT_40G_BIT;
994                 break;
995         case HCLGE_MAC_SPEED_50G:
996                 speed_bit = HCLGE_SUPPORT_50G_BIT;
997                 break;
998         case HCLGE_MAC_SPEED_100G:
999                 speed_bit = HCLGE_SUPPORT_100G_BIT;
1000                 break;
1001         case HCLGE_MAC_SPEED_200G:
1002                 speed_bit = HCLGE_SUPPORT_200G_BIT;
1003                 break;
1004         default:
1005                 return -EINVAL;
1006         }
1007
1008         if (speed_bit & speed_ability)
1009                 return 0;
1010
1011         return -EINVAL;
1012 }
1013
1014 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1015 {
1016         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1017                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1018                                  mac->supported);
1019         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1020                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1021                                  mac->supported);
1022         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1023                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1024                                  mac->supported);
1025         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1026                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1027                                  mac->supported);
1028         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1029                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1030                                  mac->supported);
1031         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1032                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1033                                  mac->supported);
1034 }
1035
1036 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1037 {
1038         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1039                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1040                                  mac->supported);
1041         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1042                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1043                                  mac->supported);
1044         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1045                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1046                                  mac->supported);
1047         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1048                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1049                                  mac->supported);
1050         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1051                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1052                                  mac->supported);
1053         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1054                 linkmode_set_bit(
1055                         ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1056                         mac->supported);
1057 }
1058
1059 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1060 {
1061         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1062                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1063                                  mac->supported);
1064         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1065                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1066                                  mac->supported);
1067         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1068                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1069                                  mac->supported);
1070         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1071                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1072                                  mac->supported);
1073         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1074                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1075                                  mac->supported);
1076         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1077                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1078                                  mac->supported);
1079 }
1080
1081 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1082 {
1083         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1084                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1085                                  mac->supported);
1086         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1087                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1088                                  mac->supported);
1089         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1090                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1091                                  mac->supported);
1092         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1093                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1094                                  mac->supported);
1095         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1096                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1097                                  mac->supported);
1098         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1099                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1100                                  mac->supported);
1101         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1102                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1103                                  mac->supported);
1104 }
1105
1106 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1107 {
1108         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1109         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1110
1111         switch (mac->speed) {
1112         case HCLGE_MAC_SPEED_10G:
1113         case HCLGE_MAC_SPEED_40G:
1114                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1115                                  mac->supported);
1116                 mac->fec_ability =
1117                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1118                 break;
1119         case HCLGE_MAC_SPEED_25G:
1120         case HCLGE_MAC_SPEED_50G:
1121                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1122                                  mac->supported);
1123                 mac->fec_ability =
1124                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1125                         BIT(HNAE3_FEC_AUTO);
1126                 break;
1127         case HCLGE_MAC_SPEED_100G:
1128         case HCLGE_MAC_SPEED_200G:
1129                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1130                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1131                 break;
1132         default:
1133                 mac->fec_ability = 0;
1134                 break;
1135         }
1136 }
1137
1138 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1139                                         u16 speed_ability)
1140 {
1141         struct hclge_mac *mac = &hdev->hw.mac;
1142
1143         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1144                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1145                                  mac->supported);
1146
1147         hclge_convert_setting_sr(mac, speed_ability);
1148         hclge_convert_setting_lr(mac, speed_ability);
1149         hclge_convert_setting_cr(mac, speed_ability);
1150         if (hnae3_dev_fec_supported(hdev))
1151                 hclge_convert_setting_fec(mac);
1152
1153         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1154         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1155         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1156 }
1157
1158 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1159                                             u16 speed_ability)
1160 {
1161         struct hclge_mac *mac = &hdev->hw.mac;
1162
1163         hclge_convert_setting_kr(mac, speed_ability);
1164         if (hnae3_dev_fec_supported(hdev))
1165                 hclge_convert_setting_fec(mac);
1166         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1167         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1168         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1169 }
1170
1171 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1172                                          u16 speed_ability)
1173 {
1174         unsigned long *supported = hdev->hw.mac.supported;
1175
1176         /* default to support all speed for GE port */
1177         if (!speed_ability)
1178                 speed_ability = HCLGE_SUPPORT_GE;
1179
1180         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1181                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1182                                  supported);
1183
1184         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1185                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1186                                  supported);
1187                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1188                                  supported);
1189         }
1190
1191         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1192                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1193                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1194         }
1195
1196         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1197         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1198         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1199         linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1200 }
1201
1202 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1203 {
1204         u8 media_type = hdev->hw.mac.media_type;
1205
1206         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1207                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1208         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1209                 hclge_parse_copper_link_mode(hdev, speed_ability);
1210         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1211                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1212 }
1213
1214 static u32 hclge_get_max_speed(u16 speed_ability)
1215 {
1216         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1217                 return HCLGE_MAC_SPEED_200G;
1218
1219         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1220                 return HCLGE_MAC_SPEED_100G;
1221
1222         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1223                 return HCLGE_MAC_SPEED_50G;
1224
1225         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1226                 return HCLGE_MAC_SPEED_40G;
1227
1228         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1229                 return HCLGE_MAC_SPEED_25G;
1230
1231         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1232                 return HCLGE_MAC_SPEED_10G;
1233
1234         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1235                 return HCLGE_MAC_SPEED_1G;
1236
1237         if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1238                 return HCLGE_MAC_SPEED_100M;
1239
1240         if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1241                 return HCLGE_MAC_SPEED_10M;
1242
1243         return HCLGE_MAC_SPEED_1G;
1244 }
1245
1246 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1247 {
1248 #define SPEED_ABILITY_EXT_SHIFT                 8
1249
1250         struct hclge_cfg_param_cmd *req;
1251         u64 mac_addr_tmp_high;
1252         u16 speed_ability_ext;
1253         u64 mac_addr_tmp;
1254         unsigned int i;
1255
1256         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1257
1258         /* get the configuration */
1259         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1260                                               HCLGE_CFG_VMDQ_M,
1261                                               HCLGE_CFG_VMDQ_S);
1262         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1263                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1264         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1265                                             HCLGE_CFG_TQP_DESC_N_M,
1266                                             HCLGE_CFG_TQP_DESC_N_S);
1267
1268         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1269                                         HCLGE_CFG_PHY_ADDR_M,
1270                                         HCLGE_CFG_PHY_ADDR_S);
1271         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1272                                           HCLGE_CFG_MEDIA_TP_M,
1273                                           HCLGE_CFG_MEDIA_TP_S);
1274         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1275                                           HCLGE_CFG_RX_BUF_LEN_M,
1276                                           HCLGE_CFG_RX_BUF_LEN_S);
1277         /* get mac_address */
1278         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1279         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1280                                             HCLGE_CFG_MAC_ADDR_H_M,
1281                                             HCLGE_CFG_MAC_ADDR_H_S);
1282
1283         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1284
1285         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1286                                              HCLGE_CFG_DEFAULT_SPEED_M,
1287                                              HCLGE_CFG_DEFAULT_SPEED_S);
1288         cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1289                                                HCLGE_CFG_RSS_SIZE_M,
1290                                                HCLGE_CFG_RSS_SIZE_S);
1291
1292         for (i = 0; i < ETH_ALEN; i++)
1293                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1294
1295         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1296         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1297
1298         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1299                                              HCLGE_CFG_SPEED_ABILITY_M,
1300                                              HCLGE_CFG_SPEED_ABILITY_S);
1301         speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1302                                             HCLGE_CFG_SPEED_ABILITY_EXT_M,
1303                                             HCLGE_CFG_SPEED_ABILITY_EXT_S);
1304         cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1305
1306         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1307                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1308                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1309         if (!cfg->umv_space)
1310                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1311
1312         cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1313                                                HCLGE_CFG_PF_RSS_SIZE_M,
1314                                                HCLGE_CFG_PF_RSS_SIZE_S);
1315
1316         /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1317          * power of 2, instead of reading out directly. This would
1318          * be more flexible for future changes and expansions.
1319          * When VF max  rss size field is HCLGE_CFG_RSS_SIZE_S,
1320          * it does not make sense if PF's field is 0. In this case, PF and VF
1321          * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1322          */
1323         cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1324                                1U << cfg->pf_rss_size_max :
1325                                cfg->vf_rss_size_max;
1326 }
1327
1328 /* hclge_get_cfg: query the static parameter from flash
1329  * @hdev: pointer to struct hclge_dev
1330  * @hcfg: the config structure to be getted
1331  */
1332 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1333 {
1334         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1335         struct hclge_cfg_param_cmd *req;
1336         unsigned int i;
1337         int ret;
1338
1339         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1340                 u32 offset = 0;
1341
1342                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1343                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1344                                            true);
1345                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1346                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1347                 /* Len should be united by 4 bytes when send to hardware */
1348                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1349                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1350                 req->offset = cpu_to_le32(offset);
1351         }
1352
1353         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1354         if (ret) {
1355                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1356                 return ret;
1357         }
1358
1359         hclge_parse_cfg(hcfg, desc);
1360
1361         return 0;
1362 }
1363
1364 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1365 {
1366 #define HCLGE_MAX_NON_TSO_BD_NUM                        8U
1367
1368         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1369
1370         ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1371         ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1372         ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1373         ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1374         ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1375         ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1376         ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1377 }
1378
1379 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1380                                   struct hclge_desc *desc)
1381 {
1382         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1383         struct hclge_dev_specs_0_cmd *req0;
1384         struct hclge_dev_specs_1_cmd *req1;
1385
1386         req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1387         req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1388
1389         ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1390         ae_dev->dev_specs.rss_ind_tbl_size =
1391                 le16_to_cpu(req0->rss_ind_tbl_size);
1392         ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1393         ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1394         ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1395         ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1396         ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1397         ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1398 }
1399
1400 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1401 {
1402         struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1403
1404         if (!dev_specs->max_non_tso_bd_num)
1405                 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1406         if (!dev_specs->rss_ind_tbl_size)
1407                 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1408         if (!dev_specs->rss_key_size)
1409                 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1410         if (!dev_specs->max_tm_rate)
1411                 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1412         if (!dev_specs->max_qset_num)
1413                 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1414         if (!dev_specs->max_int_gl)
1415                 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1416         if (!dev_specs->max_frm_size)
1417                 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1418 }
1419
1420 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1421 {
1422         struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1423         int ret;
1424         int i;
1425
1426         /* set default specifications as devices lower than version V3 do not
1427          * support querying specifications from firmware.
1428          */
1429         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1430                 hclge_set_default_dev_specs(hdev);
1431                 return 0;
1432         }
1433
1434         for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1435                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1436                                            true);
1437                 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1438         }
1439         hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1440
1441         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1442         if (ret)
1443                 return ret;
1444
1445         hclge_parse_dev_specs(hdev, desc);
1446         hclge_check_dev_specs(hdev);
1447
1448         return 0;
1449 }
1450
1451 static int hclge_get_cap(struct hclge_dev *hdev)
1452 {
1453         int ret;
1454
1455         ret = hclge_query_function_status(hdev);
1456         if (ret) {
1457                 dev_err(&hdev->pdev->dev,
1458                         "query function status error %d.\n", ret);
1459                 return ret;
1460         }
1461
1462         /* get pf resource */
1463         return hclge_query_pf_resource(hdev);
1464 }
1465
1466 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1467 {
1468 #define HCLGE_MIN_TX_DESC       64
1469 #define HCLGE_MIN_RX_DESC       64
1470
1471         if (!is_kdump_kernel())
1472                 return;
1473
1474         dev_info(&hdev->pdev->dev,
1475                  "Running kdump kernel. Using minimal resources\n");
1476
1477         /* minimal queue pairs equals to the number of vports */
1478         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1479         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1480         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1481 }
1482
1483 static int hclge_configure(struct hclge_dev *hdev)
1484 {
1485         struct hclge_cfg cfg;
1486         unsigned int i;
1487         int ret;
1488
1489         ret = hclge_get_cfg(hdev, &cfg);
1490         if (ret)
1491                 return ret;
1492
1493         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1494         hdev->base_tqp_pid = 0;
1495         hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1496         hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1497         hdev->rx_buf_len = cfg.rx_buf_len;
1498         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1499         hdev->hw.mac.media_type = cfg.media_type;
1500         hdev->hw.mac.phy_addr = cfg.phy_addr;
1501         hdev->num_tx_desc = cfg.tqp_desc_num;
1502         hdev->num_rx_desc = cfg.tqp_desc_num;
1503         hdev->tm_info.num_pg = 1;
1504         hdev->tc_max = cfg.tc_num;
1505         hdev->tm_info.hw_pfc_map = 0;
1506         hdev->wanted_umv_size = cfg.umv_space;
1507
1508         if (hnae3_dev_fd_supported(hdev)) {
1509                 hdev->fd_en = true;
1510                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1511         }
1512
1513         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1514         if (ret) {
1515                 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1516                         cfg.default_speed, ret);
1517                 return ret;
1518         }
1519
1520         hclge_parse_link_mode(hdev, cfg.speed_ability);
1521
1522         hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1523
1524         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1525             (hdev->tc_max < 1)) {
1526                 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1527                          hdev->tc_max);
1528                 hdev->tc_max = 1;
1529         }
1530
1531         /* Dev does not support DCB */
1532         if (!hnae3_dev_dcb_supported(hdev)) {
1533                 hdev->tc_max = 1;
1534                 hdev->pfc_max = 0;
1535         } else {
1536                 hdev->pfc_max = hdev->tc_max;
1537         }
1538
1539         hdev->tm_info.num_tc = 1;
1540
1541         /* Currently not support uncontiuous tc */
1542         for (i = 0; i < hdev->tm_info.num_tc; i++)
1543                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1544
1545         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1546
1547         hclge_init_kdump_kernel_config(hdev);
1548
1549         /* Set the init affinity based on pci func number */
1550         i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1551         i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1552         cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1553                         &hdev->affinity_mask);
1554
1555         return ret;
1556 }
1557
1558 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1559                             u16 tso_mss_max)
1560 {
1561         struct hclge_cfg_tso_status_cmd *req;
1562         struct hclge_desc desc;
1563
1564         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1565
1566         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1567         req->tso_mss_min = cpu_to_le16(tso_mss_min);
1568         req->tso_mss_max = cpu_to_le16(tso_mss_max);
1569
1570         return hclge_cmd_send(&hdev->hw, &desc, 1);
1571 }
1572
1573 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1574 {
1575         struct hclge_cfg_gro_status_cmd *req;
1576         struct hclge_desc desc;
1577         int ret;
1578
1579         if (!hnae3_dev_gro_supported(hdev))
1580                 return 0;
1581
1582         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1583         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1584
1585         req->gro_en = en ? 1 : 0;
1586
1587         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1588         if (ret)
1589                 dev_err(&hdev->pdev->dev,
1590                         "GRO hardware config cmd failed, ret = %d\n", ret);
1591
1592         return ret;
1593 }
1594
1595 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1596 {
1597         struct hclge_tqp *tqp;
1598         int i;
1599
1600         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1601                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1602         if (!hdev->htqp)
1603                 return -ENOMEM;
1604
1605         tqp = hdev->htqp;
1606
1607         for (i = 0; i < hdev->num_tqps; i++) {
1608                 tqp->dev = &hdev->pdev->dev;
1609                 tqp->index = i;
1610
1611                 tqp->q.ae_algo = &ae_algo;
1612                 tqp->q.buf_size = hdev->rx_buf_len;
1613                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1614                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1615
1616                 /* need an extended offset to configure queues >=
1617                  * HCLGE_TQP_MAX_SIZE_DEV_V2
1618                  */
1619                 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1620                         tqp->q.io_base = hdev->hw.io_base +
1621                                          HCLGE_TQP_REG_OFFSET +
1622                                          i * HCLGE_TQP_REG_SIZE;
1623                 else
1624                         tqp->q.io_base = hdev->hw.io_base +
1625                                          HCLGE_TQP_REG_OFFSET +
1626                                          HCLGE_TQP_EXT_REG_OFFSET +
1627                                          (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1628                                          HCLGE_TQP_REG_SIZE;
1629
1630                 tqp++;
1631         }
1632
1633         return 0;
1634 }
1635
1636 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1637                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1638 {
1639         struct hclge_tqp_map_cmd *req;
1640         struct hclge_desc desc;
1641         int ret;
1642
1643         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1644
1645         req = (struct hclge_tqp_map_cmd *)desc.data;
1646         req->tqp_id = cpu_to_le16(tqp_pid);
1647         req->tqp_vf = func_id;
1648         req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1649         if (!is_pf)
1650                 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1651         req->tqp_vid = cpu_to_le16(tqp_vid);
1652
1653         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1654         if (ret)
1655                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1656
1657         return ret;
1658 }
1659
1660 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1661 {
1662         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1663         struct hclge_dev *hdev = vport->back;
1664         int i, alloced;
1665
1666         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1667              alloced < num_tqps; i++) {
1668                 if (!hdev->htqp[i].alloced) {
1669                         hdev->htqp[i].q.handle = &vport->nic;
1670                         hdev->htqp[i].q.tqp_index = alloced;
1671                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1672                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1673                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1674                         hdev->htqp[i].alloced = true;
1675                         alloced++;
1676                 }
1677         }
1678         vport->alloc_tqps = alloced;
1679         kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1680                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1681
1682         /* ensure one to one mapping between irq and queue at default */
1683         kinfo->rss_size = min_t(u16, kinfo->rss_size,
1684                                 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1685
1686         return 0;
1687 }
1688
1689 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1690                             u16 num_tx_desc, u16 num_rx_desc)
1691
1692 {
1693         struct hnae3_handle *nic = &vport->nic;
1694         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1695         struct hclge_dev *hdev = vport->back;
1696         int ret;
1697
1698         kinfo->num_tx_desc = num_tx_desc;
1699         kinfo->num_rx_desc = num_rx_desc;
1700
1701         kinfo->rx_buf_len = hdev->rx_buf_len;
1702
1703         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1704                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1705         if (!kinfo->tqp)
1706                 return -ENOMEM;
1707
1708         ret = hclge_assign_tqp(vport, num_tqps);
1709         if (ret)
1710                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1711
1712         return ret;
1713 }
1714
1715 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1716                                   struct hclge_vport *vport)
1717 {
1718         struct hnae3_handle *nic = &vport->nic;
1719         struct hnae3_knic_private_info *kinfo;
1720         u16 i;
1721
1722         kinfo = &nic->kinfo;
1723         for (i = 0; i < vport->alloc_tqps; i++) {
1724                 struct hclge_tqp *q =
1725                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1726                 bool is_pf;
1727                 int ret;
1728
1729                 is_pf = !(vport->vport_id);
1730                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1731                                              i, is_pf);
1732                 if (ret)
1733                         return ret;
1734         }
1735
1736         return 0;
1737 }
1738
1739 static int hclge_map_tqp(struct hclge_dev *hdev)
1740 {
1741         struct hclge_vport *vport = hdev->vport;
1742         u16 i, num_vport;
1743
1744         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1745         for (i = 0; i < num_vport; i++) {
1746                 int ret;
1747
1748                 ret = hclge_map_tqp_to_vport(hdev, vport);
1749                 if (ret)
1750                         return ret;
1751
1752                 vport++;
1753         }
1754
1755         return 0;
1756 }
1757
1758 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1759 {
1760         struct hnae3_handle *nic = &vport->nic;
1761         struct hclge_dev *hdev = vport->back;
1762         int ret;
1763
1764         nic->pdev = hdev->pdev;
1765         nic->ae_algo = &ae_algo;
1766         nic->numa_node_mask = hdev->numa_node_mask;
1767
1768         ret = hclge_knic_setup(vport, num_tqps,
1769                                hdev->num_tx_desc, hdev->num_rx_desc);
1770         if (ret)
1771                 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1772
1773         return ret;
1774 }
1775
1776 static int hclge_alloc_vport(struct hclge_dev *hdev)
1777 {
1778         struct pci_dev *pdev = hdev->pdev;
1779         struct hclge_vport *vport;
1780         u32 tqp_main_vport;
1781         u32 tqp_per_vport;
1782         int num_vport, i;
1783         int ret;
1784
1785         /* We need to alloc a vport for main NIC of PF */
1786         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1787
1788         if (hdev->num_tqps < num_vport) {
1789                 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1790                         hdev->num_tqps, num_vport);
1791                 return -EINVAL;
1792         }
1793
1794         /* Alloc the same number of TQPs for every vport */
1795         tqp_per_vport = hdev->num_tqps / num_vport;
1796         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1797
1798         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1799                              GFP_KERNEL);
1800         if (!vport)
1801                 return -ENOMEM;
1802
1803         hdev->vport = vport;
1804         hdev->num_alloc_vport = num_vport;
1805
1806         if (IS_ENABLED(CONFIG_PCI_IOV))
1807                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1808
1809         for (i = 0; i < num_vport; i++) {
1810                 vport->back = hdev;
1811                 vport->vport_id = i;
1812                 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1813                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1814                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1815                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1816                 INIT_LIST_HEAD(&vport->vlan_list);
1817                 INIT_LIST_HEAD(&vport->uc_mac_list);
1818                 INIT_LIST_HEAD(&vport->mc_mac_list);
1819                 spin_lock_init(&vport->mac_list_lock);
1820
1821                 if (i == 0)
1822                         ret = hclge_vport_setup(vport, tqp_main_vport);
1823                 else
1824                         ret = hclge_vport_setup(vport, tqp_per_vport);
1825                 if (ret) {
1826                         dev_err(&pdev->dev,
1827                                 "vport setup failed for vport %d, %d\n",
1828                                 i, ret);
1829                         return ret;
1830                 }
1831
1832                 vport++;
1833         }
1834
1835         return 0;
1836 }
1837
1838 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1839                                     struct hclge_pkt_buf_alloc *buf_alloc)
1840 {
1841 /* TX buffer size is unit by 128 byte */
1842 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1843 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1844         struct hclge_tx_buff_alloc_cmd *req;
1845         struct hclge_desc desc;
1846         int ret;
1847         u8 i;
1848
1849         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1850
1851         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1852         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1853                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1854
1855                 req->tx_pkt_buff[i] =
1856                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1857                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1858         }
1859
1860         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1861         if (ret)
1862                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1863                         ret);
1864
1865         return ret;
1866 }
1867
1868 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1869                                  struct hclge_pkt_buf_alloc *buf_alloc)
1870 {
1871         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1872
1873         if (ret)
1874                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1875
1876         return ret;
1877 }
1878
1879 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1880 {
1881         unsigned int i;
1882         u32 cnt = 0;
1883
1884         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1885                 if (hdev->hw_tc_map & BIT(i))
1886                         cnt++;
1887         return cnt;
1888 }
1889
1890 /* Get the number of pfc enabled TCs, which have private buffer */
1891 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1892                                   struct hclge_pkt_buf_alloc *buf_alloc)
1893 {
1894         struct hclge_priv_buf *priv;
1895         unsigned int i;
1896         int cnt = 0;
1897
1898         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1899                 priv = &buf_alloc->priv_buf[i];
1900                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1901                     priv->enable)
1902                         cnt++;
1903         }
1904
1905         return cnt;
1906 }
1907
1908 /* Get the number of pfc disabled TCs, which have private buffer */
1909 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1910                                      struct hclge_pkt_buf_alloc *buf_alloc)
1911 {
1912         struct hclge_priv_buf *priv;
1913         unsigned int i;
1914         int cnt = 0;
1915
1916         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1917                 priv = &buf_alloc->priv_buf[i];
1918                 if (hdev->hw_tc_map & BIT(i) &&
1919                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1920                     priv->enable)
1921                         cnt++;
1922         }
1923
1924         return cnt;
1925 }
1926
1927 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1928 {
1929         struct hclge_priv_buf *priv;
1930         u32 rx_priv = 0;
1931         int i;
1932
1933         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1934                 priv = &buf_alloc->priv_buf[i];
1935                 if (priv->enable)
1936                         rx_priv += priv->buf_size;
1937         }
1938         return rx_priv;
1939 }
1940
1941 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1942 {
1943         u32 i, total_tx_size = 0;
1944
1945         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1946                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1947
1948         return total_tx_size;
1949 }
1950
1951 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1952                                 struct hclge_pkt_buf_alloc *buf_alloc,
1953                                 u32 rx_all)
1954 {
1955         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1956         u32 tc_num = hclge_get_tc_num(hdev);
1957         u32 shared_buf, aligned_mps;
1958         u32 rx_priv;
1959         int i;
1960
1961         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1962
1963         if (hnae3_dev_dcb_supported(hdev))
1964                 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1965                                         hdev->dv_buf_size;
1966         else
1967                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1968                                         + hdev->dv_buf_size;
1969
1970         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1971         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1972                              HCLGE_BUF_SIZE_UNIT);
1973
1974         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1975         if (rx_all < rx_priv + shared_std)
1976                 return false;
1977
1978         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1979         buf_alloc->s_buf.buf_size = shared_buf;
1980         if (hnae3_dev_dcb_supported(hdev)) {
1981                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1982                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1983                         - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1984                                   HCLGE_BUF_SIZE_UNIT);
1985         } else {
1986                 buf_alloc->s_buf.self.high = aligned_mps +
1987                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1988                 buf_alloc->s_buf.self.low = aligned_mps;
1989         }
1990
1991         if (hnae3_dev_dcb_supported(hdev)) {
1992                 hi_thrd = shared_buf - hdev->dv_buf_size;
1993
1994                 if (tc_num <= NEED_RESERVE_TC_NUM)
1995                         hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1996                                         / BUF_MAX_PERCENT;
1997
1998                 if (tc_num)
1999                         hi_thrd = hi_thrd / tc_num;
2000
2001                 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2002                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2003                 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2004         } else {
2005                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2006                 lo_thrd = aligned_mps;
2007         }
2008
2009         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2010                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2011                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2012         }
2013
2014         return true;
2015 }
2016
2017 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2018                                 struct hclge_pkt_buf_alloc *buf_alloc)
2019 {
2020         u32 i, total_size;
2021
2022         total_size = hdev->pkt_buf_size;
2023
2024         /* alloc tx buffer for all enabled tc */
2025         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2026                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2027
2028                 if (hdev->hw_tc_map & BIT(i)) {
2029                         if (total_size < hdev->tx_buf_size)
2030                                 return -ENOMEM;
2031
2032                         priv->tx_buf_size = hdev->tx_buf_size;
2033                 } else {
2034                         priv->tx_buf_size = 0;
2035                 }
2036
2037                 total_size -= priv->tx_buf_size;
2038         }
2039
2040         return 0;
2041 }
2042
2043 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2044                                   struct hclge_pkt_buf_alloc *buf_alloc)
2045 {
2046         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2047         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2048         unsigned int i;
2049
2050         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2051                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2052
2053                 priv->enable = 0;
2054                 priv->wl.low = 0;
2055                 priv->wl.high = 0;
2056                 priv->buf_size = 0;
2057
2058                 if (!(hdev->hw_tc_map & BIT(i)))
2059                         continue;
2060
2061                 priv->enable = 1;
2062
2063                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2064                         priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2065                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
2066                                                 HCLGE_BUF_SIZE_UNIT);
2067                 } else {
2068                         priv->wl.low = 0;
2069                         priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2070                                         aligned_mps;
2071                 }
2072
2073                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2074         }
2075
2076         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2077 }
2078
2079 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2080                                           struct hclge_pkt_buf_alloc *buf_alloc)
2081 {
2082         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2083         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2084         int i;
2085
2086         /* let the last to be cleared first */
2087         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2088                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2089                 unsigned int mask = BIT((unsigned int)i);
2090
2091                 if (hdev->hw_tc_map & mask &&
2092                     !(hdev->tm_info.hw_pfc_map & mask)) {
2093                         /* Clear the no pfc TC private buffer */
2094                         priv->wl.low = 0;
2095                         priv->wl.high = 0;
2096                         priv->buf_size = 0;
2097                         priv->enable = 0;
2098                         no_pfc_priv_num--;
2099                 }
2100
2101                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2102                     no_pfc_priv_num == 0)
2103                         break;
2104         }
2105
2106         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2107 }
2108
2109 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2110                                         struct hclge_pkt_buf_alloc *buf_alloc)
2111 {
2112         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2113         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2114         int i;
2115
2116         /* let the last to be cleared first */
2117         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2118                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2119                 unsigned int mask = BIT((unsigned int)i);
2120
2121                 if (hdev->hw_tc_map & mask &&
2122                     hdev->tm_info.hw_pfc_map & mask) {
2123                         /* Reduce the number of pfc TC with private buffer */
2124                         priv->wl.low = 0;
2125                         priv->enable = 0;
2126                         priv->wl.high = 0;
2127                         priv->buf_size = 0;
2128                         pfc_priv_num--;
2129                 }
2130
2131                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2132                     pfc_priv_num == 0)
2133                         break;
2134         }
2135
2136         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2137 }
2138
2139 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2140                                       struct hclge_pkt_buf_alloc *buf_alloc)
2141 {
2142 #define COMPENSATE_BUFFER       0x3C00
2143 #define COMPENSATE_HALF_MPS_NUM 5
2144 #define PRIV_WL_GAP             0x1800
2145
2146         u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2147         u32 tc_num = hclge_get_tc_num(hdev);
2148         u32 half_mps = hdev->mps >> 1;
2149         u32 min_rx_priv;
2150         unsigned int i;
2151
2152         if (tc_num)
2153                 rx_priv = rx_priv / tc_num;
2154
2155         if (tc_num <= NEED_RESERVE_TC_NUM)
2156                 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2157
2158         min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2159                         COMPENSATE_HALF_MPS_NUM * half_mps;
2160         min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2161         rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2162
2163         if (rx_priv < min_rx_priv)
2164                 return false;
2165
2166         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2167                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2168
2169                 priv->enable = 0;
2170                 priv->wl.low = 0;
2171                 priv->wl.high = 0;
2172                 priv->buf_size = 0;
2173
2174                 if (!(hdev->hw_tc_map & BIT(i)))
2175                         continue;
2176
2177                 priv->enable = 1;
2178                 priv->buf_size = rx_priv;
2179                 priv->wl.high = rx_priv - hdev->dv_buf_size;
2180                 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2181         }
2182
2183         buf_alloc->s_buf.buf_size = 0;
2184
2185         return true;
2186 }
2187
2188 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2189  * @hdev: pointer to struct hclge_dev
2190  * @buf_alloc: pointer to buffer calculation data
2191  * @return: 0: calculate sucessful, negative: fail
2192  */
2193 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2194                                 struct hclge_pkt_buf_alloc *buf_alloc)
2195 {
2196         /* When DCB is not supported, rx private buffer is not allocated. */
2197         if (!hnae3_dev_dcb_supported(hdev)) {
2198                 u32 rx_all = hdev->pkt_buf_size;
2199
2200                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2201                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2202                         return -ENOMEM;
2203
2204                 return 0;
2205         }
2206
2207         if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2208                 return 0;
2209
2210         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2211                 return 0;
2212
2213         /* try to decrease the buffer size */
2214         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2215                 return 0;
2216
2217         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2218                 return 0;
2219
2220         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2221                 return 0;
2222
2223         return -ENOMEM;
2224 }
2225
2226 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2227                                    struct hclge_pkt_buf_alloc *buf_alloc)
2228 {
2229         struct hclge_rx_priv_buff_cmd *req;
2230         struct hclge_desc desc;
2231         int ret;
2232         int i;
2233
2234         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2235         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2236
2237         /* Alloc private buffer TCs */
2238         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2239                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2240
2241                 req->buf_num[i] =
2242                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2243                 req->buf_num[i] |=
2244                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2245         }
2246
2247         req->shared_buf =
2248                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2249                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
2250
2251         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2252         if (ret)
2253                 dev_err(&hdev->pdev->dev,
2254                         "rx private buffer alloc cmd failed %d\n", ret);
2255
2256         return ret;
2257 }
2258
2259 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2260                                    struct hclge_pkt_buf_alloc *buf_alloc)
2261 {
2262         struct hclge_rx_priv_wl_buf *req;
2263         struct hclge_priv_buf *priv;
2264         struct hclge_desc desc[2];
2265         int i, j;
2266         int ret;
2267
2268         for (i = 0; i < 2; i++) {
2269                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2270                                            false);
2271                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2272
2273                 /* The first descriptor set the NEXT bit to 1 */
2274                 if (i == 0)
2275                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2276                 else
2277                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2278
2279                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2280                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2281
2282                         priv = &buf_alloc->priv_buf[idx];
2283                         req->tc_wl[j].high =
2284                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2285                         req->tc_wl[j].high |=
2286                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2287                         req->tc_wl[j].low =
2288                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2289                         req->tc_wl[j].low |=
2290                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2291                 }
2292         }
2293
2294         /* Send 2 descriptor at one time */
2295         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2296         if (ret)
2297                 dev_err(&hdev->pdev->dev,
2298                         "rx private waterline config cmd failed %d\n",
2299                         ret);
2300         return ret;
2301 }
2302
2303 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2304                                     struct hclge_pkt_buf_alloc *buf_alloc)
2305 {
2306         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2307         struct hclge_rx_com_thrd *req;
2308         struct hclge_desc desc[2];
2309         struct hclge_tc_thrd *tc;
2310         int i, j;
2311         int ret;
2312
2313         for (i = 0; i < 2; i++) {
2314                 hclge_cmd_setup_basic_desc(&desc[i],
2315                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2316                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2317
2318                 /* The first descriptor set the NEXT bit to 1 */
2319                 if (i == 0)
2320                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2321                 else
2322                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2323
2324                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2325                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2326
2327                         req->com_thrd[j].high =
2328                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2329                         req->com_thrd[j].high |=
2330                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2331                         req->com_thrd[j].low =
2332                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2333                         req->com_thrd[j].low |=
2334                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2335                 }
2336         }
2337
2338         /* Send 2 descriptors at one time */
2339         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2340         if (ret)
2341                 dev_err(&hdev->pdev->dev,
2342                         "common threshold config cmd failed %d\n", ret);
2343         return ret;
2344 }
2345
2346 static int hclge_common_wl_config(struct hclge_dev *hdev,
2347                                   struct hclge_pkt_buf_alloc *buf_alloc)
2348 {
2349         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2350         struct hclge_rx_com_wl *req;
2351         struct hclge_desc desc;
2352         int ret;
2353
2354         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2355
2356         req = (struct hclge_rx_com_wl *)desc.data;
2357         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2358         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2359
2360         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2361         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2362
2363         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2364         if (ret)
2365                 dev_err(&hdev->pdev->dev,
2366                         "common waterline config cmd failed %d\n", ret);
2367
2368         return ret;
2369 }
2370
2371 int hclge_buffer_alloc(struct hclge_dev *hdev)
2372 {
2373         struct hclge_pkt_buf_alloc *pkt_buf;
2374         int ret;
2375
2376         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2377         if (!pkt_buf)
2378                 return -ENOMEM;
2379
2380         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2381         if (ret) {
2382                 dev_err(&hdev->pdev->dev,
2383                         "could not calc tx buffer size for all TCs %d\n", ret);
2384                 goto out;
2385         }
2386
2387         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2388         if (ret) {
2389                 dev_err(&hdev->pdev->dev,
2390                         "could not alloc tx buffers %d\n", ret);
2391                 goto out;
2392         }
2393
2394         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2395         if (ret) {
2396                 dev_err(&hdev->pdev->dev,
2397                         "could not calc rx priv buffer size for all TCs %d\n",
2398                         ret);
2399                 goto out;
2400         }
2401
2402         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2403         if (ret) {
2404                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2405                         ret);
2406                 goto out;
2407         }
2408
2409         if (hnae3_dev_dcb_supported(hdev)) {
2410                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2411                 if (ret) {
2412                         dev_err(&hdev->pdev->dev,
2413                                 "could not configure rx private waterline %d\n",
2414                                 ret);
2415                         goto out;
2416                 }
2417
2418                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2419                 if (ret) {
2420                         dev_err(&hdev->pdev->dev,
2421                                 "could not configure common threshold %d\n",
2422                                 ret);
2423                         goto out;
2424                 }
2425         }
2426
2427         ret = hclge_common_wl_config(hdev, pkt_buf);
2428         if (ret)
2429                 dev_err(&hdev->pdev->dev,
2430                         "could not configure common waterline %d\n", ret);
2431
2432 out:
2433         kfree(pkt_buf);
2434         return ret;
2435 }
2436
2437 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2438 {
2439         struct hnae3_handle *roce = &vport->roce;
2440         struct hnae3_handle *nic = &vport->nic;
2441         struct hclge_dev *hdev = vport->back;
2442
2443         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2444
2445         if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2446                 return -EINVAL;
2447
2448         roce->rinfo.base_vector = hdev->roce_base_vector;
2449
2450         roce->rinfo.netdev = nic->kinfo.netdev;
2451         roce->rinfo.roce_io_base = hdev->hw.io_base;
2452         roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2453
2454         roce->pdev = nic->pdev;
2455         roce->ae_algo = nic->ae_algo;
2456         roce->numa_node_mask = nic->numa_node_mask;
2457
2458         return 0;
2459 }
2460
2461 static int hclge_init_msi(struct hclge_dev *hdev)
2462 {
2463         struct pci_dev *pdev = hdev->pdev;
2464         int vectors;
2465         int i;
2466
2467         vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2468                                         hdev->num_msi,
2469                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2470         if (vectors < 0) {
2471                 dev_err(&pdev->dev,
2472                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2473                         vectors);
2474                 return vectors;
2475         }
2476         if (vectors < hdev->num_msi)
2477                 dev_warn(&hdev->pdev->dev,
2478                          "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2479                          hdev->num_msi, vectors);
2480
2481         hdev->num_msi = vectors;
2482         hdev->num_msi_left = vectors;
2483
2484         hdev->base_msi_vector = pdev->irq;
2485         hdev->roce_base_vector = hdev->base_msi_vector +
2486                                 hdev->num_nic_msi;
2487
2488         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2489                                            sizeof(u16), GFP_KERNEL);
2490         if (!hdev->vector_status) {
2491                 pci_free_irq_vectors(pdev);
2492                 return -ENOMEM;
2493         }
2494
2495         for (i = 0; i < hdev->num_msi; i++)
2496                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2497
2498         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2499                                         sizeof(int), GFP_KERNEL);
2500         if (!hdev->vector_irq) {
2501                 pci_free_irq_vectors(pdev);
2502                 return -ENOMEM;
2503         }
2504
2505         return 0;
2506 }
2507
2508 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2509 {
2510         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2511                 duplex = HCLGE_MAC_FULL;
2512
2513         return duplex;
2514 }
2515
2516 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2517                                       u8 duplex)
2518 {
2519         struct hclge_config_mac_speed_dup_cmd *req;
2520         struct hclge_desc desc;
2521         int ret;
2522
2523         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2524
2525         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2526
2527         if (duplex)
2528                 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2529
2530         switch (speed) {
2531         case HCLGE_MAC_SPEED_10M:
2532                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2533                                 HCLGE_CFG_SPEED_S, 6);
2534                 break;
2535         case HCLGE_MAC_SPEED_100M:
2536                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2537                                 HCLGE_CFG_SPEED_S, 7);
2538                 break;
2539         case HCLGE_MAC_SPEED_1G:
2540                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2541                                 HCLGE_CFG_SPEED_S, 0);
2542                 break;
2543         case HCLGE_MAC_SPEED_10G:
2544                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2545                                 HCLGE_CFG_SPEED_S, 1);
2546                 break;
2547         case HCLGE_MAC_SPEED_25G:
2548                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2549                                 HCLGE_CFG_SPEED_S, 2);
2550                 break;
2551         case HCLGE_MAC_SPEED_40G:
2552                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2553                                 HCLGE_CFG_SPEED_S, 3);
2554                 break;
2555         case HCLGE_MAC_SPEED_50G:
2556                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2557                                 HCLGE_CFG_SPEED_S, 4);
2558                 break;
2559         case HCLGE_MAC_SPEED_100G:
2560                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2561                                 HCLGE_CFG_SPEED_S, 5);
2562                 break;
2563         case HCLGE_MAC_SPEED_200G:
2564                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2565                                 HCLGE_CFG_SPEED_S, 8);
2566                 break;
2567         default:
2568                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2569                 return -EINVAL;
2570         }
2571
2572         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2573                       1);
2574
2575         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2576         if (ret) {
2577                 dev_err(&hdev->pdev->dev,
2578                         "mac speed/duplex config cmd failed %d.\n", ret);
2579                 return ret;
2580         }
2581
2582         return 0;
2583 }
2584
2585 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2586 {
2587         struct hclge_mac *mac = &hdev->hw.mac;
2588         int ret;
2589
2590         duplex = hclge_check_speed_dup(duplex, speed);
2591         if (!mac->support_autoneg && mac->speed == speed &&
2592             mac->duplex == duplex)
2593                 return 0;
2594
2595         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2596         if (ret)
2597                 return ret;
2598
2599         hdev->hw.mac.speed = speed;
2600         hdev->hw.mac.duplex = duplex;
2601
2602         return 0;
2603 }
2604
2605 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2606                                      u8 duplex)
2607 {
2608         struct hclge_vport *vport = hclge_get_vport(handle);
2609         struct hclge_dev *hdev = vport->back;
2610
2611         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2612 }
2613
2614 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2615 {
2616         struct hclge_config_auto_neg_cmd *req;
2617         struct hclge_desc desc;
2618         u32 flag = 0;
2619         int ret;
2620
2621         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2622
2623         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2624         if (enable)
2625                 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2626         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2627
2628         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2629         if (ret)
2630                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2631                         ret);
2632
2633         return ret;
2634 }
2635
2636 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2637 {
2638         struct hclge_vport *vport = hclge_get_vport(handle);
2639         struct hclge_dev *hdev = vport->back;
2640
2641         if (!hdev->hw.mac.support_autoneg) {
2642                 if (enable) {
2643                         dev_err(&hdev->pdev->dev,
2644                                 "autoneg is not supported by current port\n");
2645                         return -EOPNOTSUPP;
2646                 } else {
2647                         return 0;
2648                 }
2649         }
2650
2651         return hclge_set_autoneg_en(hdev, enable);
2652 }
2653
2654 static int hclge_get_autoneg(struct hnae3_handle *handle)
2655 {
2656         struct hclge_vport *vport = hclge_get_vport(handle);
2657         struct hclge_dev *hdev = vport->back;
2658         struct phy_device *phydev = hdev->hw.mac.phydev;
2659
2660         if (phydev)
2661                 return phydev->autoneg;
2662
2663         return hdev->hw.mac.autoneg;
2664 }
2665
2666 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2667 {
2668         struct hclge_vport *vport = hclge_get_vport(handle);
2669         struct hclge_dev *hdev = vport->back;
2670         int ret;
2671
2672         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2673
2674         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2675         if (ret)
2676                 return ret;
2677         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2678 }
2679
2680 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2681 {
2682         struct hclge_vport *vport = hclge_get_vport(handle);
2683         struct hclge_dev *hdev = vport->back;
2684
2685         if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2686                 return hclge_set_autoneg_en(hdev, !halt);
2687
2688         return 0;
2689 }
2690
2691 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2692 {
2693         struct hclge_config_fec_cmd *req;
2694         struct hclge_desc desc;
2695         int ret;
2696
2697         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2698
2699         req = (struct hclge_config_fec_cmd *)desc.data;
2700         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2701                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2702         if (fec_mode & BIT(HNAE3_FEC_RS))
2703                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2704                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2705         if (fec_mode & BIT(HNAE3_FEC_BASER))
2706                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2707                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2708
2709         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2710         if (ret)
2711                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2712
2713         return ret;
2714 }
2715
2716 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2717 {
2718         struct hclge_vport *vport = hclge_get_vport(handle);
2719         struct hclge_dev *hdev = vport->back;
2720         struct hclge_mac *mac = &hdev->hw.mac;
2721         int ret;
2722
2723         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2724                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2725                 return -EINVAL;
2726         }
2727
2728         ret = hclge_set_fec_hw(hdev, fec_mode);
2729         if (ret)
2730                 return ret;
2731
2732         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2733         return 0;
2734 }
2735
2736 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2737                           u8 *fec_mode)
2738 {
2739         struct hclge_vport *vport = hclge_get_vport(handle);
2740         struct hclge_dev *hdev = vport->back;
2741         struct hclge_mac *mac = &hdev->hw.mac;
2742
2743         if (fec_ability)
2744                 *fec_ability = mac->fec_ability;
2745         if (fec_mode)
2746                 *fec_mode = mac->fec_mode;
2747 }
2748
2749 static int hclge_mac_init(struct hclge_dev *hdev)
2750 {
2751         struct hclge_mac *mac = &hdev->hw.mac;
2752         int ret;
2753
2754         hdev->support_sfp_query = true;
2755         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2756         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2757                                          hdev->hw.mac.duplex);
2758         if (ret)
2759                 return ret;
2760
2761         if (hdev->hw.mac.support_autoneg) {
2762                 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2763                 if (ret)
2764                         return ret;
2765         }
2766
2767         mac->link = 0;
2768
2769         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2770                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2771                 if (ret)
2772                         return ret;
2773         }
2774
2775         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2776         if (ret) {
2777                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2778                 return ret;
2779         }
2780
2781         ret = hclge_set_default_loopback(hdev);
2782         if (ret)
2783                 return ret;
2784
2785         ret = hclge_buffer_alloc(hdev);
2786         if (ret)
2787                 dev_err(&hdev->pdev->dev,
2788                         "allocate buffer fail, ret=%d\n", ret);
2789
2790         return ret;
2791 }
2792
2793 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2794 {
2795         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2796             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2797                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2798                                     hclge_wq, &hdev->service_task, 0);
2799 }
2800
2801 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2802 {
2803         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2804             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2805                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2806                                     hclge_wq, &hdev->service_task, 0);
2807 }
2808
2809 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2810 {
2811         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2812             !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2813                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2814                                     hclge_wq, &hdev->service_task,
2815                                     delay_time);
2816 }
2817
2818 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2819 {
2820         struct hclge_link_status_cmd *req;
2821         struct hclge_desc desc;
2822         int ret;
2823
2824         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2825         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2826         if (ret) {
2827                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2828                         ret);
2829                 return ret;
2830         }
2831
2832         req = (struct hclge_link_status_cmd *)desc.data;
2833         *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2834                 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2835
2836         return 0;
2837 }
2838
2839 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2840 {
2841         struct phy_device *phydev = hdev->hw.mac.phydev;
2842
2843         *link_status = HCLGE_LINK_STATUS_DOWN;
2844
2845         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2846                 return 0;
2847
2848         if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2849                 return 0;
2850
2851         return hclge_get_mac_link_status(hdev, link_status);
2852 }
2853
2854 static void hclge_update_link_status(struct hclge_dev *hdev)
2855 {
2856         struct hnae3_client *rclient = hdev->roce_client;
2857         struct hnae3_client *client = hdev->nic_client;
2858         struct hnae3_handle *rhandle;
2859         struct hnae3_handle *handle;
2860         int state;
2861         int ret;
2862         int i;
2863
2864         if (!client)
2865                 return;
2866
2867         if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2868                 return;
2869
2870         ret = hclge_get_mac_phy_link(hdev, &state);
2871         if (ret) {
2872                 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2873                 return;
2874         }
2875
2876         if (state != hdev->hw.mac.link) {
2877                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2878                         handle = &hdev->vport[i].nic;
2879                         client->ops->link_status_change(handle, state);
2880                         hclge_config_mac_tnl_int(hdev, state);
2881                         rhandle = &hdev->vport[i].roce;
2882                         if (rclient && rclient->ops->link_status_change)
2883                                 rclient->ops->link_status_change(rhandle,
2884                                                                  state);
2885                 }
2886                 hdev->hw.mac.link = state;
2887         }
2888
2889         clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2890 }
2891
2892 static void hclge_update_port_capability(struct hclge_mac *mac)
2893 {
2894         /* update fec ability by speed */
2895         hclge_convert_setting_fec(mac);
2896
2897         /* firmware can not identify back plane type, the media type
2898          * read from configuration can help deal it
2899          */
2900         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2901             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2902                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2903         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2904                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2905
2906         if (mac->support_autoneg) {
2907                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2908                 linkmode_copy(mac->advertising, mac->supported);
2909         } else {
2910                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2911                                    mac->supported);
2912                 linkmode_zero(mac->advertising);
2913         }
2914 }
2915
2916 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2917 {
2918         struct hclge_sfp_info_cmd *resp;
2919         struct hclge_desc desc;
2920         int ret;
2921
2922         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2923         resp = (struct hclge_sfp_info_cmd *)desc.data;
2924         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2925         if (ret == -EOPNOTSUPP) {
2926                 dev_warn(&hdev->pdev->dev,
2927                          "IMP do not support get SFP speed %d\n", ret);
2928                 return ret;
2929         } else if (ret) {
2930                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2931                 return ret;
2932         }
2933
2934         *speed = le32_to_cpu(resp->speed);
2935
2936         return 0;
2937 }
2938
2939 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2940 {
2941         struct hclge_sfp_info_cmd *resp;
2942         struct hclge_desc desc;
2943         int ret;
2944
2945         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2946         resp = (struct hclge_sfp_info_cmd *)desc.data;
2947
2948         resp->query_type = QUERY_ACTIVE_SPEED;
2949
2950         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2951         if (ret == -EOPNOTSUPP) {
2952                 dev_warn(&hdev->pdev->dev,
2953                          "IMP does not support get SFP info %d\n", ret);
2954                 return ret;
2955         } else if (ret) {
2956                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2957                 return ret;
2958         }
2959
2960         /* In some case, mac speed get from IMP may be 0, it shouldn't be
2961          * set to mac->speed.
2962          */
2963         if (!le32_to_cpu(resp->speed))
2964                 return 0;
2965
2966         mac->speed = le32_to_cpu(resp->speed);
2967         /* if resp->speed_ability is 0, it means it's an old version
2968          * firmware, do not update these params
2969          */
2970         if (resp->speed_ability) {
2971                 mac->module_type = le32_to_cpu(resp->module_type);
2972                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2973                 mac->autoneg = resp->autoneg;
2974                 mac->support_autoneg = resp->autoneg_ability;
2975                 mac->speed_type = QUERY_ACTIVE_SPEED;
2976                 if (!resp->active_fec)
2977                         mac->fec_mode = 0;
2978                 else
2979                         mac->fec_mode = BIT(resp->active_fec);
2980         } else {
2981                 mac->speed_type = QUERY_SFP_SPEED;
2982         }
2983
2984         return 0;
2985 }
2986
2987 static int hclge_update_port_info(struct hclge_dev *hdev)
2988 {
2989         struct hclge_mac *mac = &hdev->hw.mac;
2990         int speed = HCLGE_MAC_SPEED_UNKNOWN;
2991         int ret;
2992
2993         /* get the port info from SFP cmd if not copper port */
2994         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2995                 return 0;
2996
2997         /* if IMP does not support get SFP/qSFP info, return directly */
2998         if (!hdev->support_sfp_query)
2999                 return 0;
3000
3001         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3002                 ret = hclge_get_sfp_info(hdev, mac);
3003         else
3004                 ret = hclge_get_sfp_speed(hdev, &speed);
3005
3006         if (ret == -EOPNOTSUPP) {
3007                 hdev->support_sfp_query = false;
3008                 return ret;
3009         } else if (ret) {
3010                 return ret;
3011         }
3012
3013         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3014                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3015                         hclge_update_port_capability(mac);
3016                         return 0;
3017                 }
3018                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3019                                                HCLGE_MAC_FULL);
3020         } else {
3021                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3022                         return 0; /* do nothing if no SFP */
3023
3024                 /* must config full duplex for SFP */
3025                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3026         }
3027 }
3028
3029 static int hclge_get_status(struct hnae3_handle *handle)
3030 {
3031         struct hclge_vport *vport = hclge_get_vport(handle);
3032         struct hclge_dev *hdev = vport->back;
3033
3034         hclge_update_link_status(hdev);
3035
3036         return hdev->hw.mac.link;
3037 }
3038
3039 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3040 {
3041         if (!pci_num_vf(hdev->pdev)) {
3042                 dev_err(&hdev->pdev->dev,
3043                         "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3044                 return NULL;
3045         }
3046
3047         if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3048                 dev_err(&hdev->pdev->dev,
3049                         "vf id(%d) is out of range(0 <= vfid < %d)\n",
3050                         vf, pci_num_vf(hdev->pdev));
3051                 return NULL;
3052         }
3053
3054         /* VF start from 1 in vport */
3055         vf += HCLGE_VF_VPORT_START_NUM;
3056         return &hdev->vport[vf];
3057 }
3058
3059 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3060                                struct ifla_vf_info *ivf)
3061 {
3062         struct hclge_vport *vport = hclge_get_vport(handle);
3063         struct hclge_dev *hdev = vport->back;
3064
3065         vport = hclge_get_vf_vport(hdev, vf);
3066         if (!vport)
3067                 return -EINVAL;
3068
3069         ivf->vf = vf;
3070         ivf->linkstate = vport->vf_info.link_state;
3071         ivf->spoofchk = vport->vf_info.spoofchk;
3072         ivf->trusted = vport->vf_info.trusted;
3073         ivf->min_tx_rate = 0;
3074         ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3075         ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3076         ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3077         ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3078         ether_addr_copy(ivf->mac, vport->vf_info.mac);
3079
3080         return 0;
3081 }
3082
3083 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3084                                    int link_state)
3085 {
3086         struct hclge_vport *vport = hclge_get_vport(handle);
3087         struct hclge_dev *hdev = vport->back;
3088
3089         vport = hclge_get_vf_vport(hdev, vf);
3090         if (!vport)
3091                 return -EINVAL;
3092
3093         vport->vf_info.link_state = link_state;
3094
3095         return 0;
3096 }
3097
3098 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3099 {
3100         u32 cmdq_src_reg, msix_src_reg;
3101
3102         /* fetch the events from their corresponding regs */
3103         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3104         msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3105
3106         /* Assumption: If by any chance reset and mailbox events are reported
3107          * together then we will only process reset event in this go and will
3108          * defer the processing of the mailbox events. Since, we would have not
3109          * cleared RX CMDQ event this time we would receive again another
3110          * interrupt from H/W just for the mailbox.
3111          *
3112          * check for vector0 reset event sources
3113          */
3114         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3115                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3116                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3117                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3118                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3119                 hdev->rst_stats.imp_rst_cnt++;
3120                 return HCLGE_VECTOR0_EVENT_RST;
3121         }
3122
3123         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3124                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3125                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3126                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3127                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3128                 hdev->rst_stats.global_rst_cnt++;
3129                 return HCLGE_VECTOR0_EVENT_RST;
3130         }
3131
3132         /* check for vector0 msix event source */
3133         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3134                 *clearval = msix_src_reg;
3135                 return HCLGE_VECTOR0_EVENT_ERR;
3136         }
3137
3138         /* check for vector0 mailbox(=CMDQ RX) event source */
3139         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3140                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3141                 *clearval = cmdq_src_reg;
3142                 return HCLGE_VECTOR0_EVENT_MBX;
3143         }
3144
3145         /* print other vector0 event source */
3146         dev_info(&hdev->pdev->dev,
3147                  "CMDQ INT status:0x%x, other INT status:0x%x\n",
3148                  cmdq_src_reg, msix_src_reg);
3149         *clearval = msix_src_reg;
3150
3151         return HCLGE_VECTOR0_EVENT_OTHER;
3152 }
3153
3154 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3155                                     u32 regclr)
3156 {
3157         switch (event_type) {
3158         case HCLGE_VECTOR0_EVENT_RST:
3159                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3160                 break;
3161         case HCLGE_VECTOR0_EVENT_MBX:
3162                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3163                 break;
3164         default:
3165                 break;
3166         }
3167 }
3168
3169 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3170 {
3171         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3172                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3173                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3174                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3175         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3176 }
3177
3178 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3179 {
3180         writel(enable ? 1 : 0, vector->addr);
3181 }
3182
3183 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3184 {
3185         struct hclge_dev *hdev = data;
3186         u32 clearval = 0;
3187         u32 event_cause;
3188
3189         hclge_enable_vector(&hdev->misc_vector, false);
3190         event_cause = hclge_check_event_cause(hdev, &clearval);
3191
3192         /* vector 0 interrupt is shared with reset and mailbox source events.*/
3193         switch (event_cause) {
3194         case HCLGE_VECTOR0_EVENT_ERR:
3195                 /* we do not know what type of reset is required now. This could
3196                  * only be decided after we fetch the type of errors which
3197                  * caused this event. Therefore, we will do below for now:
3198                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3199                  *    have defered type of reset to be used.
3200                  * 2. Schedule the reset serivce task.
3201                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3202                  *    will fetch the correct type of reset.  This would be done
3203                  *    by first decoding the types of errors.
3204                  */
3205                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3206                 fallthrough;
3207         case HCLGE_VECTOR0_EVENT_RST:
3208                 hclge_reset_task_schedule(hdev);
3209                 break;
3210         case HCLGE_VECTOR0_EVENT_MBX:
3211                 /* If we are here then,
3212                  * 1. Either we are not handling any mbx task and we are not
3213                  *    scheduled as well
3214                  *                        OR
3215                  * 2. We could be handling a mbx task but nothing more is
3216                  *    scheduled.
3217                  * In both cases, we should schedule mbx task as there are more
3218                  * mbx messages reported by this interrupt.
3219                  */
3220                 hclge_mbx_task_schedule(hdev);
3221                 break;
3222         default:
3223                 dev_warn(&hdev->pdev->dev,
3224                          "received unknown or unhandled event of vector0\n");
3225                 break;
3226         }
3227
3228         hclge_clear_event_cause(hdev, event_cause, clearval);
3229
3230         /* Enable interrupt if it is not cause by reset. And when
3231          * clearval equal to 0, it means interrupt status may be
3232          * cleared by hardware before driver reads status register.
3233          * For this case, vector0 interrupt also should be enabled.
3234          */
3235         if (!clearval ||
3236             event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3237                 hclge_enable_vector(&hdev->misc_vector, true);
3238         }
3239
3240         return IRQ_HANDLED;
3241 }
3242
3243 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3244 {
3245         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3246                 dev_warn(&hdev->pdev->dev,
3247                          "vector(vector_id %d) has been freed.\n", vector_id);
3248                 return;
3249         }
3250
3251         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3252         hdev->num_msi_left += 1;
3253         hdev->num_msi_used -= 1;
3254 }
3255
3256 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3257 {
3258         struct hclge_misc_vector *vector = &hdev->misc_vector;
3259
3260         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3261
3262         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3263         hdev->vector_status[0] = 0;
3264
3265         hdev->num_msi_left -= 1;
3266         hdev->num_msi_used += 1;
3267 }
3268
3269 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3270                                       const cpumask_t *mask)
3271 {
3272         struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3273                                               affinity_notify);
3274
3275         cpumask_copy(&hdev->affinity_mask, mask);
3276 }
3277
3278 static void hclge_irq_affinity_release(struct kref *ref)
3279 {
3280 }
3281
3282 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3283 {
3284         irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3285                               &hdev->affinity_mask);
3286
3287         hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3288         hdev->affinity_notify.release = hclge_irq_affinity_release;
3289         irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3290                                   &hdev->affinity_notify);
3291 }
3292
3293 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3294 {
3295         irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3296         irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3297 }
3298
3299 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3300 {
3301         int ret;
3302
3303         hclge_get_misc_vector(hdev);
3304
3305         /* this would be explicitly freed in the end */
3306         snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3307                  HCLGE_NAME, pci_name(hdev->pdev));
3308         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3309                           0, hdev->misc_vector.name, hdev);
3310         if (ret) {
3311                 hclge_free_vector(hdev, 0);
3312                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3313                         hdev->misc_vector.vector_irq);
3314         }
3315
3316         return ret;
3317 }
3318
3319 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3320 {
3321         free_irq(hdev->misc_vector.vector_irq, hdev);
3322         hclge_free_vector(hdev, 0);
3323 }
3324
3325 int hclge_notify_client(struct hclge_dev *hdev,
3326                         enum hnae3_reset_notify_type type)
3327 {
3328         struct hnae3_client *client = hdev->nic_client;
3329         u16 i;
3330
3331         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3332                 return 0;
3333
3334         if (!client->ops->reset_notify)
3335                 return -EOPNOTSUPP;
3336
3337         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3338                 struct hnae3_handle *handle = &hdev->vport[i].nic;
3339                 int ret;
3340
3341                 ret = client->ops->reset_notify(handle, type);
3342                 if (ret) {
3343                         dev_err(&hdev->pdev->dev,
3344                                 "notify nic client failed %d(%d)\n", type, ret);
3345                         return ret;
3346                 }
3347         }
3348
3349         return 0;
3350 }
3351
3352 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3353                                     enum hnae3_reset_notify_type type)
3354 {
3355         struct hnae3_client *client = hdev->roce_client;
3356         int ret;
3357         u16 i;
3358
3359         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3360                 return 0;
3361
3362         if (!client->ops->reset_notify)
3363                 return -EOPNOTSUPP;
3364
3365         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3366                 struct hnae3_handle *handle = &hdev->vport[i].roce;
3367
3368                 ret = client->ops->reset_notify(handle, type);
3369                 if (ret) {
3370                         dev_err(&hdev->pdev->dev,
3371                                 "notify roce client failed %d(%d)",
3372                                 type, ret);
3373                         return ret;
3374                 }
3375         }
3376
3377         return ret;
3378 }
3379
3380 static int hclge_reset_wait(struct hclge_dev *hdev)
3381 {
3382 #define HCLGE_RESET_WATI_MS     100
3383 #define HCLGE_RESET_WAIT_CNT    350
3384
3385         u32 val, reg, reg_bit;
3386         u32 cnt = 0;
3387
3388         switch (hdev->reset_type) {
3389         case HNAE3_IMP_RESET:
3390                 reg = HCLGE_GLOBAL_RESET_REG;
3391                 reg_bit = HCLGE_IMP_RESET_BIT;
3392                 break;
3393         case HNAE3_GLOBAL_RESET:
3394                 reg = HCLGE_GLOBAL_RESET_REG;
3395                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3396                 break;
3397         case HNAE3_FUNC_RESET:
3398                 reg = HCLGE_FUN_RST_ING;
3399                 reg_bit = HCLGE_FUN_RST_ING_B;
3400                 break;
3401         default:
3402                 dev_err(&hdev->pdev->dev,
3403                         "Wait for unsupported reset type: %d\n",
3404                         hdev->reset_type);
3405                 return -EINVAL;
3406         }
3407
3408         val = hclge_read_dev(&hdev->hw, reg);
3409         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3410                 msleep(HCLGE_RESET_WATI_MS);
3411                 val = hclge_read_dev(&hdev->hw, reg);
3412                 cnt++;
3413         }
3414
3415         if (cnt >= HCLGE_RESET_WAIT_CNT) {
3416                 dev_warn(&hdev->pdev->dev,
3417                          "Wait for reset timeout: %d\n", hdev->reset_type);
3418                 return -EBUSY;
3419         }
3420
3421         return 0;
3422 }
3423
3424 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3425 {
3426         struct hclge_vf_rst_cmd *req;
3427         struct hclge_desc desc;
3428
3429         req = (struct hclge_vf_rst_cmd *)desc.data;
3430         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3431         req->dest_vfid = func_id;
3432
3433         if (reset)
3434                 req->vf_rst = 0x1;
3435
3436         return hclge_cmd_send(&hdev->hw, &desc, 1);
3437 }
3438
3439 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3440 {
3441         int i;
3442
3443         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3444                 struct hclge_vport *vport = &hdev->vport[i];
3445                 int ret;
3446
3447                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3448                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3449                 if (ret) {
3450                         dev_err(&hdev->pdev->dev,
3451                                 "set vf(%u) rst failed %d!\n",
3452                                 vport->vport_id, ret);
3453                         return ret;
3454                 }
3455
3456                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3457                         continue;
3458
3459                 /* Inform VF to process the reset.
3460                  * hclge_inform_reset_assert_to_vf may fail if VF
3461                  * driver is not loaded.
3462                  */
3463                 ret = hclge_inform_reset_assert_to_vf(vport);
3464                 if (ret)
3465                         dev_warn(&hdev->pdev->dev,
3466                                  "inform reset to vf(%u) failed %d!\n",
3467                                  vport->vport_id, ret);
3468         }
3469
3470         return 0;
3471 }
3472
3473 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3474 {
3475         if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3476             test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3477             test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3478                 return;
3479
3480         hclge_mbx_handler(hdev);
3481
3482         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3483 }
3484
3485 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3486 {
3487         struct hclge_pf_rst_sync_cmd *req;
3488         struct hclge_desc desc;
3489         int cnt = 0;
3490         int ret;
3491
3492         req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3493         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3494
3495         do {
3496                 /* vf need to down netdev by mbx during PF or FLR reset */
3497                 hclge_mailbox_service_task(hdev);
3498
3499                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3500                 /* for compatible with old firmware, wait
3501                  * 100 ms for VF to stop IO
3502                  */
3503                 if (ret == -EOPNOTSUPP) {
3504                         msleep(HCLGE_RESET_SYNC_TIME);
3505                         return;
3506                 } else if (ret) {
3507                         dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3508                                  ret);
3509                         return;
3510                 } else if (req->all_vf_ready) {
3511                         return;
3512                 }
3513                 msleep(HCLGE_PF_RESET_SYNC_TIME);
3514                 hclge_cmd_reuse_desc(&desc, true);
3515         } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3516
3517         dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3518 }
3519
3520 void hclge_report_hw_error(struct hclge_dev *hdev,
3521                            enum hnae3_hw_error_type type)
3522 {
3523         struct hnae3_client *client = hdev->nic_client;
3524         u16 i;
3525
3526         if (!client || !client->ops->process_hw_error ||
3527             !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3528                 return;
3529
3530         for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3531                 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3532 }
3533
3534 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3535 {
3536         u32 reg_val;
3537
3538         reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3539         if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3540                 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3541                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3542                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3543         }
3544
3545         if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3546                 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3547                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3548                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3549         }
3550 }
3551
3552 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3553 {
3554         struct hclge_desc desc;
3555         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3556         int ret;
3557
3558         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3559         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3560         req->fun_reset_vfid = func_id;
3561
3562         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3563         if (ret)
3564                 dev_err(&hdev->pdev->dev,
3565                         "send function reset cmd fail, status =%d\n", ret);
3566
3567         return ret;
3568 }
3569
3570 static void hclge_do_reset(struct hclge_dev *hdev)
3571 {
3572         struct hnae3_handle *handle = &hdev->vport[0].nic;
3573         struct pci_dev *pdev = hdev->pdev;
3574         u32 val;
3575
3576         if (hclge_get_hw_reset_stat(handle)) {
3577                 dev_info(&pdev->dev, "hardware reset not finish\n");
3578                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3579                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3580                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3581                 return;
3582         }
3583
3584         switch (hdev->reset_type) {
3585         case HNAE3_GLOBAL_RESET:
3586                 dev_info(&pdev->dev, "global reset requested\n");
3587                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3588                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3589                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3590                 break;
3591         case HNAE3_FUNC_RESET:
3592                 dev_info(&pdev->dev, "PF reset requested\n");
3593                 /* schedule again to check later */
3594                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3595                 hclge_reset_task_schedule(hdev);
3596                 break;
3597         default:
3598                 dev_warn(&pdev->dev,
3599                          "unsupported reset type: %d\n", hdev->reset_type);
3600                 break;
3601         }
3602 }
3603
3604 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3605                                                    unsigned long *addr)
3606 {
3607         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3608         struct hclge_dev *hdev = ae_dev->priv;
3609
3610         /* first, resolve any unknown reset type to the known type(s) */
3611         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3612                 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3613                                         HCLGE_MISC_VECTOR_INT_STS);
3614                 /* we will intentionally ignore any errors from this function
3615                  *  as we will end up in *some* reset request in any case
3616                  */
3617                 if (hclge_handle_hw_msix_error(hdev, addr))
3618                         dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3619                                  msix_sts_reg);
3620
3621                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3622                 /* We defered the clearing of the error event which caused
3623                  * interrupt since it was not posssible to do that in
3624                  * interrupt context (and this is the reason we introduced
3625                  * new UNKNOWN reset type). Now, the errors have been
3626                  * handled and cleared in hardware we can safely enable
3627                  * interrupts. This is an exception to the norm.
3628                  */
3629                 hclge_enable_vector(&hdev->misc_vector, true);
3630         }
3631
3632         /* return the highest priority reset level amongst all */
3633         if (test_bit(HNAE3_IMP_RESET, addr)) {
3634                 rst_level = HNAE3_IMP_RESET;
3635                 clear_bit(HNAE3_IMP_RESET, addr);
3636                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3637                 clear_bit(HNAE3_FUNC_RESET, addr);
3638         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3639                 rst_level = HNAE3_GLOBAL_RESET;
3640                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3641                 clear_bit(HNAE3_FUNC_RESET, addr);
3642         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3643                 rst_level = HNAE3_FUNC_RESET;
3644                 clear_bit(HNAE3_FUNC_RESET, addr);
3645         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3646                 rst_level = HNAE3_FLR_RESET;
3647                 clear_bit(HNAE3_FLR_RESET, addr);
3648         }
3649
3650         if (hdev->reset_type != HNAE3_NONE_RESET &&
3651             rst_level < hdev->reset_type)
3652                 return HNAE3_NONE_RESET;
3653
3654         return rst_level;
3655 }
3656
3657 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3658 {
3659         u32 clearval = 0;
3660
3661         switch (hdev->reset_type) {
3662         case HNAE3_IMP_RESET:
3663                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3664                 break;
3665         case HNAE3_GLOBAL_RESET:
3666                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3667                 break;
3668         default:
3669                 break;
3670         }
3671
3672         if (!clearval)
3673                 return;
3674
3675         /* For revision 0x20, the reset interrupt source
3676          * can only be cleared after hardware reset done
3677          */
3678         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3679                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3680                                 clearval);
3681
3682         hclge_enable_vector(&hdev->misc_vector, true);
3683 }
3684
3685 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3686 {
3687         u32 reg_val;
3688
3689         reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3690         if (enable)
3691                 reg_val |= HCLGE_NIC_SW_RST_RDY;
3692         else
3693                 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3694
3695         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3696 }
3697
3698 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3699 {
3700         int ret;
3701
3702         ret = hclge_set_all_vf_rst(hdev, true);
3703         if (ret)
3704                 return ret;
3705
3706         hclge_func_reset_sync_vf(hdev);
3707
3708         return 0;
3709 }
3710
3711 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3712 {
3713         u32 reg_val;
3714         int ret = 0;
3715
3716         switch (hdev->reset_type) {
3717         case HNAE3_FUNC_RESET:
3718                 ret = hclge_func_reset_notify_vf(hdev);
3719                 if (ret)
3720                         return ret;
3721
3722                 ret = hclge_func_reset_cmd(hdev, 0);
3723                 if (ret) {
3724                         dev_err(&hdev->pdev->dev,
3725                                 "asserting function reset fail %d!\n", ret);
3726                         return ret;
3727                 }
3728
3729                 /* After performaning pf reset, it is not necessary to do the
3730                  * mailbox handling or send any command to firmware, because
3731                  * any mailbox handling or command to firmware is only valid
3732                  * after hclge_cmd_init is called.
3733                  */
3734                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3735                 hdev->rst_stats.pf_rst_cnt++;
3736                 break;
3737         case HNAE3_FLR_RESET:
3738                 ret = hclge_func_reset_notify_vf(hdev);
3739                 if (ret)
3740                         return ret;
3741                 break;
3742         case HNAE3_IMP_RESET:
3743                 hclge_handle_imp_error(hdev);
3744                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3745                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3746                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3747                 break;
3748         default:
3749                 break;
3750         }
3751
3752         /* inform hardware that preparatory work is done */
3753         msleep(HCLGE_RESET_SYNC_TIME);
3754         hclge_reset_handshake(hdev, true);
3755         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3756
3757         return ret;
3758 }
3759
3760 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3761 {
3762 #define MAX_RESET_FAIL_CNT 5
3763
3764         if (hdev->reset_pending) {
3765                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3766                          hdev->reset_pending);
3767                 return true;
3768         } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3769                    HCLGE_RESET_INT_M) {
3770                 dev_info(&hdev->pdev->dev,
3771                          "reset failed because new reset interrupt\n");
3772                 hclge_clear_reset_cause(hdev);
3773                 return false;
3774         } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3775                 hdev->rst_stats.reset_fail_cnt++;
3776                 set_bit(hdev->reset_type, &hdev->reset_pending);
3777                 dev_info(&hdev->pdev->dev,
3778                          "re-schedule reset task(%u)\n",
3779                          hdev->rst_stats.reset_fail_cnt);
3780                 return true;
3781         }
3782
3783         hclge_clear_reset_cause(hdev);
3784
3785         /* recover the handshake status when reset fail */
3786         hclge_reset_handshake(hdev, true);
3787
3788         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3789
3790         hclge_dbg_dump_rst_info(hdev);
3791
3792         set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3793
3794         return false;
3795 }
3796
3797 static int hclge_set_rst_done(struct hclge_dev *hdev)
3798 {
3799         struct hclge_pf_rst_done_cmd *req;
3800         struct hclge_desc desc;
3801         int ret;
3802
3803         req = (struct hclge_pf_rst_done_cmd *)desc.data;
3804         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3805         req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3806
3807         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3808         /* To be compatible with the old firmware, which does not support
3809          * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3810          * return success
3811          */
3812         if (ret == -EOPNOTSUPP) {
3813                 dev_warn(&hdev->pdev->dev,
3814                          "current firmware does not support command(0x%x)!\n",
3815                          HCLGE_OPC_PF_RST_DONE);
3816                 return 0;
3817         } else if (ret) {
3818                 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3819                         ret);
3820         }
3821
3822         return ret;
3823 }
3824
3825 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3826 {
3827         int ret = 0;
3828
3829         switch (hdev->reset_type) {
3830         case HNAE3_FUNC_RESET:
3831         case HNAE3_FLR_RESET:
3832                 ret = hclge_set_all_vf_rst(hdev, false);
3833                 break;
3834         case HNAE3_GLOBAL_RESET:
3835         case HNAE3_IMP_RESET:
3836                 ret = hclge_set_rst_done(hdev);
3837                 break;
3838         default:
3839                 break;
3840         }
3841
3842         /* clear up the handshake status after re-initialize done */
3843         hclge_reset_handshake(hdev, false);
3844
3845         return ret;
3846 }
3847
3848 static int hclge_reset_stack(struct hclge_dev *hdev)
3849 {
3850         int ret;
3851
3852         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3853         if (ret)
3854                 return ret;
3855
3856         ret = hclge_reset_ae_dev(hdev->ae_dev);
3857         if (ret)
3858                 return ret;
3859
3860         return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3861 }
3862
3863 static int hclge_reset_prepare(struct hclge_dev *hdev)
3864 {
3865         int ret;
3866
3867         hdev->rst_stats.reset_cnt++;
3868         /* perform reset of the stack & ae device for a client */
3869         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3870         if (ret)
3871                 return ret;
3872
3873         rtnl_lock();
3874         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3875         rtnl_unlock();
3876         if (ret)
3877                 return ret;
3878
3879         return hclge_reset_prepare_wait(hdev);
3880 }
3881
3882 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3883 {
3884         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3885         enum hnae3_reset_type reset_level;
3886         int ret;
3887
3888         hdev->rst_stats.hw_reset_done_cnt++;
3889
3890         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3891         if (ret)
3892                 return ret;
3893
3894         rtnl_lock();
3895         ret = hclge_reset_stack(hdev);
3896         rtnl_unlock();
3897         if (ret)
3898                 return ret;
3899
3900         hclge_clear_reset_cause(hdev);
3901
3902         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3903         /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3904          * times
3905          */
3906         if (ret &&
3907             hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3908                 return ret;
3909
3910         ret = hclge_reset_prepare_up(hdev);
3911         if (ret)
3912                 return ret;
3913
3914         rtnl_lock();
3915         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3916         rtnl_unlock();
3917         if (ret)
3918                 return ret;
3919
3920         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3921         if (ret)
3922                 return ret;
3923
3924         hdev->last_reset_time = jiffies;
3925         hdev->rst_stats.reset_fail_cnt = 0;
3926         hdev->rst_stats.reset_done_cnt++;
3927         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3928
3929         /* if default_reset_request has a higher level reset request,
3930          * it should be handled as soon as possible. since some errors
3931          * need this kind of reset to fix.
3932          */
3933         reset_level = hclge_get_reset_level(ae_dev,
3934                                             &hdev->default_reset_request);
3935         if (reset_level != HNAE3_NONE_RESET)
3936                 set_bit(reset_level, &hdev->reset_request);
3937
3938         return 0;
3939 }
3940
3941 static void hclge_reset(struct hclge_dev *hdev)
3942 {
3943         if (hclge_reset_prepare(hdev))
3944                 goto err_reset;
3945
3946         if (hclge_reset_wait(hdev))
3947                 goto err_reset;
3948
3949         if (hclge_reset_rebuild(hdev))
3950                 goto err_reset;
3951
3952         return;
3953
3954 err_reset:
3955         if (hclge_reset_err_handle(hdev))
3956                 hclge_reset_task_schedule(hdev);
3957 }
3958
3959 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3960 {
3961         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3962         struct hclge_dev *hdev = ae_dev->priv;
3963
3964         /* We might end up getting called broadly because of 2 below cases:
3965          * 1. Recoverable error was conveyed through APEI and only way to bring
3966          *    normalcy is to reset.
3967          * 2. A new reset request from the stack due to timeout
3968          *
3969          * For the first case,error event might not have ae handle available.
3970          * check if this is a new reset request and we are not here just because
3971          * last reset attempt did not succeed and watchdog hit us again. We will
3972          * know this if last reset request did not occur very recently (watchdog
3973          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3974          * In case of new request we reset the "reset level" to PF reset.
3975          * And if it is a repeat reset request of the most recent one then we
3976          * want to make sure we throttle the reset request. Therefore, we will
3977          * not allow it again before 3*HZ times.
3978          */
3979         if (!handle)
3980                 handle = &hdev->vport[0].nic;
3981
3982         if (time_before(jiffies, (hdev->last_reset_time +
3983                                   HCLGE_RESET_INTERVAL))) {
3984                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3985                 return;
3986         } else if (hdev->default_reset_request) {
3987                 hdev->reset_level =
3988                         hclge_get_reset_level(ae_dev,
3989                                               &hdev->default_reset_request);
3990         } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3991                 hdev->reset_level = HNAE3_FUNC_RESET;
3992         }
3993
3994         dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3995                  hdev->reset_level);
3996
3997         /* request reset & schedule reset task */
3998         set_bit(hdev->reset_level, &hdev->reset_request);
3999         hclge_reset_task_schedule(hdev);
4000
4001         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4002                 hdev->reset_level++;
4003 }
4004
4005 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4006                                         enum hnae3_reset_type rst_type)
4007 {
4008         struct hclge_dev *hdev = ae_dev->priv;
4009
4010         set_bit(rst_type, &hdev->default_reset_request);
4011 }
4012
4013 static void hclge_reset_timer(struct timer_list *t)
4014 {
4015         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4016
4017         /* if default_reset_request has no value, it means that this reset
4018          * request has already be handled, so just return here
4019          */
4020         if (!hdev->default_reset_request)
4021                 return;
4022
4023         dev_info(&hdev->pdev->dev,
4024                  "triggering reset in reset timer\n");
4025         hclge_reset_event(hdev->pdev, NULL);
4026 }
4027
4028 static void hclge_reset_subtask(struct hclge_dev *hdev)
4029 {
4030         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4031
4032         /* check if there is any ongoing reset in the hardware. This status can
4033          * be checked from reset_pending. If there is then, we need to wait for
4034          * hardware to complete reset.
4035          *    a. If we are able to figure out in reasonable time that hardware
4036          *       has fully resetted then, we can proceed with driver, client
4037          *       reset.
4038          *    b. else, we can come back later to check this status so re-sched
4039          *       now.
4040          */
4041         hdev->last_reset_time = jiffies;
4042         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4043         if (hdev->reset_type != HNAE3_NONE_RESET)
4044                 hclge_reset(hdev);
4045
4046         /* check if we got any *new* reset requests to be honored */
4047         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4048         if (hdev->reset_type != HNAE3_NONE_RESET)
4049                 hclge_do_reset(hdev);
4050
4051         hdev->reset_type = HNAE3_NONE_RESET;
4052 }
4053
4054 static void hclge_reset_service_task(struct hclge_dev *hdev)
4055 {
4056         if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4057                 return;
4058
4059         down(&hdev->reset_sem);
4060         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4061
4062         hclge_reset_subtask(hdev);
4063
4064         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4065         up(&hdev->reset_sem);
4066 }
4067
4068 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4069 {
4070         int i;
4071
4072         /* start from vport 1 for PF is always alive */
4073         for (i = 1; i < hdev->num_alloc_vport; i++) {
4074                 struct hclge_vport *vport = &hdev->vport[i];
4075
4076                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4077                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4078
4079                 /* If vf is not alive, set to default value */
4080                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4081                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4082         }
4083 }
4084
4085 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4086 {
4087         unsigned long delta = round_jiffies_relative(HZ);
4088
4089         if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4090                 return;
4091
4092         /* Always handle the link updating to make sure link state is
4093          * updated when it is triggered by mbx.
4094          */
4095         hclge_update_link_status(hdev);
4096         hclge_sync_mac_table(hdev);
4097         hclge_sync_promisc_mode(hdev);
4098
4099         if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4100                 delta = jiffies - hdev->last_serv_processed;
4101
4102                 if (delta < round_jiffies_relative(HZ)) {
4103                         delta = round_jiffies_relative(HZ) - delta;
4104                         goto out;
4105                 }
4106         }
4107
4108         hdev->serv_processed_cnt++;
4109         hclge_update_vport_alive(hdev);
4110
4111         if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4112                 hdev->last_serv_processed = jiffies;
4113                 goto out;
4114         }
4115
4116         if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4117                 hclge_update_stats_for_all(hdev);
4118
4119         hclge_update_port_info(hdev);
4120         hclge_sync_vlan_filter(hdev);
4121
4122         if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4123                 hclge_rfs_filter_expire(hdev);
4124
4125         hdev->last_serv_processed = jiffies;
4126
4127 out:
4128         hclge_task_schedule(hdev, delta);
4129 }
4130
4131 static void hclge_service_task(struct work_struct *work)
4132 {
4133         struct hclge_dev *hdev =
4134                 container_of(work, struct hclge_dev, service_task.work);
4135
4136         hclge_reset_service_task(hdev);
4137         hclge_mailbox_service_task(hdev);
4138         hclge_periodic_service_task(hdev);
4139
4140         /* Handle reset and mbx again in case periodical task delays the
4141          * handling by calling hclge_task_schedule() in
4142          * hclge_periodic_service_task().
4143          */
4144         hclge_reset_service_task(hdev);
4145         hclge_mailbox_service_task(hdev);
4146 }
4147
4148 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4149 {
4150         /* VF handle has no client */
4151         if (!handle->client)
4152                 return container_of(handle, struct hclge_vport, nic);
4153         else if (handle->client->type == HNAE3_CLIENT_ROCE)
4154                 return container_of(handle, struct hclge_vport, roce);
4155         else
4156                 return container_of(handle, struct hclge_vport, nic);
4157 }
4158
4159 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4160                                   struct hnae3_vector_info *vector_info)
4161 {
4162 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2  64
4163
4164         vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4165
4166         /* need an extend offset to config vector >= 64 */
4167         if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4168                 vector_info->io_addr = hdev->hw.io_base +
4169                                 HCLGE_VECTOR_REG_BASE +
4170                                 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4171         else
4172                 vector_info->io_addr = hdev->hw.io_base +
4173                                 HCLGE_VECTOR_EXT_REG_BASE +
4174                                 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4175                                 HCLGE_VECTOR_REG_OFFSET_H +
4176                                 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4177                                 HCLGE_VECTOR_REG_OFFSET;
4178
4179         hdev->vector_status[idx] = hdev->vport[0].vport_id;
4180         hdev->vector_irq[idx] = vector_info->vector;
4181 }
4182
4183 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4184                             struct hnae3_vector_info *vector_info)
4185 {
4186         struct hclge_vport *vport = hclge_get_vport(handle);
4187         struct hnae3_vector_info *vector = vector_info;
4188         struct hclge_dev *hdev = vport->back;
4189         int alloc = 0;
4190         u16 i = 0;
4191         u16 j;
4192
4193         vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4194         vector_num = min(hdev->num_msi_left, vector_num);
4195
4196         for (j = 0; j < vector_num; j++) {
4197                 while (++i < hdev->num_nic_msi) {
4198                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4199                                 hclge_get_vector_info(hdev, i, vector);
4200                                 vector++;
4201                                 alloc++;
4202
4203                                 break;
4204                         }
4205                 }
4206         }
4207         hdev->num_msi_left -= alloc;
4208         hdev->num_msi_used += alloc;
4209
4210         return alloc;
4211 }
4212
4213 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4214 {
4215         int i;
4216
4217         for (i = 0; i < hdev->num_msi; i++)
4218                 if (vector == hdev->vector_irq[i])
4219                         return i;
4220
4221         return -EINVAL;
4222 }
4223
4224 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4225 {
4226         struct hclge_vport *vport = hclge_get_vport(handle);
4227         struct hclge_dev *hdev = vport->back;
4228         int vector_id;
4229
4230         vector_id = hclge_get_vector_index(hdev, vector);
4231         if (vector_id < 0) {
4232                 dev_err(&hdev->pdev->dev,
4233                         "Get vector index fail. vector = %d\n", vector);
4234                 return vector_id;
4235         }
4236
4237         hclge_free_vector(hdev, vector_id);
4238
4239         return 0;
4240 }
4241
4242 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4243 {
4244         return HCLGE_RSS_KEY_SIZE;
4245 }
4246
4247 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4248                                   const u8 hfunc, const u8 *key)
4249 {
4250         struct hclge_rss_config_cmd *req;
4251         unsigned int key_offset = 0;
4252         struct hclge_desc desc;
4253         int key_counts;
4254         int key_size;
4255         int ret;
4256
4257         key_counts = HCLGE_RSS_KEY_SIZE;
4258         req = (struct hclge_rss_config_cmd *)desc.data;
4259
4260         while (key_counts) {
4261                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4262                                            false);
4263
4264                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4265                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4266
4267                 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4268                 memcpy(req->hash_key,
4269                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4270
4271                 key_counts -= key_size;
4272                 key_offset++;
4273                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4274                 if (ret) {
4275                         dev_err(&hdev->pdev->dev,
4276                                 "Configure RSS config fail, status = %d\n",
4277                                 ret);
4278                         return ret;
4279                 }
4280         }
4281         return 0;
4282 }
4283
4284 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4285 {
4286         struct hclge_rss_indirection_table_cmd *req;
4287         struct hclge_desc desc;
4288         int rss_cfg_tbl_num;
4289         u8 rss_msb_oft;
4290         u8 rss_msb_val;
4291         int ret;
4292         u16 qid;
4293         int i;
4294         u32 j;
4295
4296         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4297         rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4298                           HCLGE_RSS_CFG_TBL_SIZE;
4299
4300         for (i = 0; i < rss_cfg_tbl_num; i++) {
4301                 hclge_cmd_setup_basic_desc
4302                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4303
4304                 req->start_table_index =
4305                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4306                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4307                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4308                         qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4309                         req->rss_qid_l[j] = qid & 0xff;
4310                         rss_msb_oft =
4311                                 j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4312                         rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4313                                 (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4314                         req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4315                 }
4316                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4317                 if (ret) {
4318                         dev_err(&hdev->pdev->dev,
4319                                 "Configure rss indir table fail,status = %d\n",
4320                                 ret);
4321                         return ret;
4322                 }
4323         }
4324         return 0;
4325 }
4326
4327 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4328                                  u16 *tc_size, u16 *tc_offset)
4329 {
4330         struct hclge_rss_tc_mode_cmd *req;
4331         struct hclge_desc desc;
4332         int ret;
4333         int i;
4334
4335         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4336         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4337
4338         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4339                 u16 mode = 0;
4340
4341                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4342                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4343                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4344                 hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4345                               tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4346                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4347                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4348
4349                 req->rss_tc_mode[i] = cpu_to_le16(mode);
4350         }
4351
4352         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4353         if (ret)
4354                 dev_err(&hdev->pdev->dev,
4355                         "Configure rss tc mode fail, status = %d\n", ret);
4356
4357         return ret;
4358 }
4359
4360 static void hclge_get_rss_type(struct hclge_vport *vport)
4361 {
4362         if (vport->rss_tuple_sets.ipv4_tcp_en ||
4363             vport->rss_tuple_sets.ipv4_udp_en ||
4364             vport->rss_tuple_sets.ipv4_sctp_en ||
4365             vport->rss_tuple_sets.ipv6_tcp_en ||
4366             vport->rss_tuple_sets.ipv6_udp_en ||
4367             vport->rss_tuple_sets.ipv6_sctp_en)
4368                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4369         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4370                  vport->rss_tuple_sets.ipv6_fragment_en)
4371                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4372         else
4373                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4374 }
4375
4376 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4377 {
4378         struct hclge_rss_input_tuple_cmd *req;
4379         struct hclge_desc desc;
4380         int ret;
4381
4382         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4383
4384         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4385
4386         /* Get the tuple cfg from pf */
4387         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4388         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4389         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4390         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4391         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4392         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4393         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4394         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4395         hclge_get_rss_type(&hdev->vport[0]);
4396         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4397         if (ret)
4398                 dev_err(&hdev->pdev->dev,
4399                         "Configure rss input fail, status = %d\n", ret);
4400         return ret;
4401 }
4402
4403 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4404                          u8 *key, u8 *hfunc)
4405 {
4406         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4407         struct hclge_vport *vport = hclge_get_vport(handle);
4408         int i;
4409
4410         /* Get hash algorithm */
4411         if (hfunc) {
4412                 switch (vport->rss_algo) {
4413                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4414                         *hfunc = ETH_RSS_HASH_TOP;
4415                         break;
4416                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4417                         *hfunc = ETH_RSS_HASH_XOR;
4418                         break;
4419                 default:
4420                         *hfunc = ETH_RSS_HASH_UNKNOWN;
4421                         break;
4422                 }
4423         }
4424
4425         /* Get the RSS Key required by the user */
4426         if (key)
4427                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4428
4429         /* Get indirect table */
4430         if (indir)
4431                 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4432                         indir[i] =  vport->rss_indirection_tbl[i];
4433
4434         return 0;
4435 }
4436
4437 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4438                          const  u8 *key, const  u8 hfunc)
4439 {
4440         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4441         struct hclge_vport *vport = hclge_get_vport(handle);
4442         struct hclge_dev *hdev = vport->back;
4443         u8 hash_algo;
4444         int ret, i;
4445
4446         /* Set the RSS Hash Key if specififed by the user */
4447         if (key) {
4448                 switch (hfunc) {
4449                 case ETH_RSS_HASH_TOP:
4450                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4451                         break;
4452                 case ETH_RSS_HASH_XOR:
4453                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4454                         break;
4455                 case ETH_RSS_HASH_NO_CHANGE:
4456                         hash_algo = vport->rss_algo;
4457                         break;
4458                 default:
4459                         return -EINVAL;
4460                 }
4461
4462                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4463                 if (ret)
4464                         return ret;
4465
4466                 /* Update the shadow RSS key with user specified qids */
4467                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4468                 vport->rss_algo = hash_algo;
4469         }
4470
4471         /* Update the shadow RSS table with user specified qids */
4472         for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4473                 vport->rss_indirection_tbl[i] = indir[i];
4474
4475         /* Update the hardware */
4476         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4477 }
4478
4479 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4480 {
4481         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4482
4483         if (nfc->data & RXH_L4_B_2_3)
4484                 hash_sets |= HCLGE_D_PORT_BIT;
4485         else
4486                 hash_sets &= ~HCLGE_D_PORT_BIT;
4487
4488         if (nfc->data & RXH_IP_SRC)
4489                 hash_sets |= HCLGE_S_IP_BIT;
4490         else
4491                 hash_sets &= ~HCLGE_S_IP_BIT;
4492
4493         if (nfc->data & RXH_IP_DST)
4494                 hash_sets |= HCLGE_D_IP_BIT;
4495         else
4496                 hash_sets &= ~HCLGE_D_IP_BIT;
4497
4498         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4499                 hash_sets |= HCLGE_V_TAG_BIT;
4500
4501         return hash_sets;
4502 }
4503
4504 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4505                                     struct ethtool_rxnfc *nfc,
4506                                     struct hclge_rss_input_tuple_cmd *req)
4507 {
4508         struct hclge_dev *hdev = vport->back;
4509         u8 tuple_sets;
4510
4511         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4512         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4513         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4514         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4515         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4516         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4517         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4518         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4519
4520         tuple_sets = hclge_get_rss_hash_bits(nfc);
4521         switch (nfc->flow_type) {
4522         case TCP_V4_FLOW:
4523                 req->ipv4_tcp_en = tuple_sets;
4524                 break;
4525         case TCP_V6_FLOW:
4526                 req->ipv6_tcp_en = tuple_sets;
4527                 break;
4528         case UDP_V4_FLOW:
4529                 req->ipv4_udp_en = tuple_sets;
4530                 break;
4531         case UDP_V6_FLOW:
4532                 req->ipv6_udp_en = tuple_sets;
4533                 break;
4534         case SCTP_V4_FLOW:
4535                 req->ipv4_sctp_en = tuple_sets;
4536                 break;
4537         case SCTP_V6_FLOW:
4538                 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4539                     (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4540                         return -EINVAL;
4541
4542                 req->ipv6_sctp_en = tuple_sets;
4543                 break;
4544         case IPV4_FLOW:
4545                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4546                 break;
4547         case IPV6_FLOW:
4548                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4549                 break;
4550         default:
4551                 return -EINVAL;
4552         }
4553
4554         return 0;
4555 }
4556
4557 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4558                                struct ethtool_rxnfc *nfc)
4559 {
4560         struct hclge_vport *vport = hclge_get_vport(handle);
4561         struct hclge_dev *hdev = vport->back;
4562         struct hclge_rss_input_tuple_cmd *req;
4563         struct hclge_desc desc;
4564         int ret;
4565
4566         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4567                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
4568                 return -EINVAL;
4569
4570         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4571         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4572
4573         ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4574         if (ret) {
4575                 dev_err(&hdev->pdev->dev,
4576                         "failed to init rss tuple cmd, ret = %d\n", ret);
4577                 return ret;
4578         }
4579
4580         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4581         if (ret) {
4582                 dev_err(&hdev->pdev->dev,
4583                         "Set rss tuple fail, status = %d\n", ret);
4584                 return ret;
4585         }
4586
4587         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4588         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4589         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4590         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4591         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4592         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4593         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4594         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4595         hclge_get_rss_type(vport);
4596         return 0;
4597 }
4598
4599 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4600                                      u8 *tuple_sets)
4601 {
4602         switch (flow_type) {
4603         case TCP_V4_FLOW:
4604                 *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4605                 break;
4606         case UDP_V4_FLOW:
4607                 *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4608                 break;
4609         case TCP_V6_FLOW:
4610                 *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4611                 break;
4612         case UDP_V6_FLOW:
4613                 *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4614                 break;
4615         case SCTP_V4_FLOW:
4616                 *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4617                 break;
4618         case SCTP_V6_FLOW:
4619                 *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4620                 break;
4621         case IPV4_FLOW:
4622         case IPV6_FLOW:
4623                 *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4624                 break;
4625         default:
4626                 return -EINVAL;
4627         }
4628
4629         return 0;
4630 }
4631
4632 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4633 {
4634         u64 tuple_data = 0;
4635
4636         if (tuple_sets & HCLGE_D_PORT_BIT)
4637                 tuple_data |= RXH_L4_B_2_3;
4638         if (tuple_sets & HCLGE_S_PORT_BIT)
4639                 tuple_data |= RXH_L4_B_0_1;
4640         if (tuple_sets & HCLGE_D_IP_BIT)
4641                 tuple_data |= RXH_IP_DST;
4642         if (tuple_sets & HCLGE_S_IP_BIT)
4643                 tuple_data |= RXH_IP_SRC;
4644
4645         return tuple_data;
4646 }
4647
4648 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4649                                struct ethtool_rxnfc *nfc)
4650 {
4651         struct hclge_vport *vport = hclge_get_vport(handle);
4652         u8 tuple_sets;
4653         int ret;
4654
4655         nfc->data = 0;
4656
4657         ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4658         if (ret || !tuple_sets)
4659                 return ret;
4660
4661         nfc->data = hclge_convert_rss_tuple(tuple_sets);
4662
4663         return 0;
4664 }
4665
4666 static int hclge_get_tc_size(struct hnae3_handle *handle)
4667 {
4668         struct hclge_vport *vport = hclge_get_vport(handle);
4669         struct hclge_dev *hdev = vport->back;
4670
4671         return hdev->pf_rss_size_max;
4672 }
4673
4674 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4675 {
4676         struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4677         struct hclge_vport *vport = hdev->vport;
4678         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4679         u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4680         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4681         struct hnae3_tc_info *tc_info;
4682         u16 roundup_size;
4683         u16 rss_size;
4684         int i;
4685
4686         tc_info = &vport->nic.kinfo.tc_info;
4687         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4688                 rss_size = tc_info->tqp_count[i];
4689                 tc_valid[i] = 0;
4690
4691                 if (!(hdev->hw_tc_map & BIT(i)))
4692                         continue;
4693
4694                 /* tc_size set to hardware is the log2 of roundup power of two
4695                  * of rss_size, the acutal queue size is limited by indirection
4696                  * table.
4697                  */
4698                 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4699                     rss_size == 0) {
4700                         dev_err(&hdev->pdev->dev,
4701                                 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4702                                 rss_size);
4703                         return -EINVAL;
4704                 }
4705
4706                 roundup_size = roundup_pow_of_two(rss_size);
4707                 roundup_size = ilog2(roundup_size);
4708
4709                 tc_valid[i] = 1;
4710                 tc_size[i] = roundup_size;
4711                 tc_offset[i] = tc_info->tqp_offset[i];
4712         }
4713
4714         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4715 }
4716
4717 int hclge_rss_init_hw(struct hclge_dev *hdev)
4718 {
4719         struct hclge_vport *vport = hdev->vport;
4720         u16 *rss_indir = vport[0].rss_indirection_tbl;
4721         u8 *key = vport[0].rss_hash_key;
4722         u8 hfunc = vport[0].rss_algo;
4723         int ret;
4724
4725         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4726         if (ret)
4727                 return ret;
4728
4729         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4730         if (ret)
4731                 return ret;
4732
4733         ret = hclge_set_rss_input_tuple(hdev);
4734         if (ret)
4735                 return ret;
4736
4737         return hclge_init_rss_tc_mode(hdev);
4738 }
4739
4740 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4741 {
4742         struct hclge_vport *vport = hdev->vport;
4743         int i, j;
4744
4745         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4746                 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
4747                         vport[j].rss_indirection_tbl[i] =
4748                                 i % vport[j].alloc_rss_size;
4749         }
4750 }
4751
4752 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
4753 {
4754         u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
4755         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4756         struct hclge_vport *vport = hdev->vport;
4757
4758         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
4759                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4760
4761         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4762                 u16 *rss_ind_tbl;
4763
4764                 vport[i].rss_tuple_sets.ipv4_tcp_en =
4765                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4766                 vport[i].rss_tuple_sets.ipv4_udp_en =
4767                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4768                 vport[i].rss_tuple_sets.ipv4_sctp_en =
4769                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4770                 vport[i].rss_tuple_sets.ipv4_fragment_en =
4771                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4772                 vport[i].rss_tuple_sets.ipv6_tcp_en =
4773                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4774                 vport[i].rss_tuple_sets.ipv6_udp_en =
4775                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4776                 vport[i].rss_tuple_sets.ipv6_sctp_en =
4777                         hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
4778                         HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
4779                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4780                 vport[i].rss_tuple_sets.ipv6_fragment_en =
4781                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4782
4783                 vport[i].rss_algo = rss_algo;
4784
4785                 rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
4786                                            sizeof(*rss_ind_tbl), GFP_KERNEL);
4787                 if (!rss_ind_tbl)
4788                         return -ENOMEM;
4789
4790                 vport[i].rss_indirection_tbl = rss_ind_tbl;
4791                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4792                        HCLGE_RSS_KEY_SIZE);
4793         }
4794
4795         hclge_rss_indir_init_cfg(hdev);
4796
4797         return 0;
4798 }
4799
4800 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4801                                 int vector_id, bool en,
4802                                 struct hnae3_ring_chain_node *ring_chain)
4803 {
4804         struct hclge_dev *hdev = vport->back;
4805         struct hnae3_ring_chain_node *node;
4806         struct hclge_desc desc;
4807         struct hclge_ctrl_vector_chain_cmd *req =
4808                 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4809         enum hclge_cmd_status status;
4810         enum hclge_opcode_type op;
4811         u16 tqp_type_and_id;
4812         int i;
4813
4814         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4815         hclge_cmd_setup_basic_desc(&desc, op, false);
4816         req->int_vector_id_l = hnae3_get_field(vector_id,
4817                                                HCLGE_VECTOR_ID_L_M,
4818                                                HCLGE_VECTOR_ID_L_S);
4819         req->int_vector_id_h = hnae3_get_field(vector_id,
4820                                                HCLGE_VECTOR_ID_H_M,
4821                                                HCLGE_VECTOR_ID_H_S);
4822
4823         i = 0;
4824         for (node = ring_chain; node; node = node->next) {
4825                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4826                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4827                                 HCLGE_INT_TYPE_S,
4828                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4829                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4830                                 HCLGE_TQP_ID_S, node->tqp_index);
4831                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4832                                 HCLGE_INT_GL_IDX_S,
4833                                 hnae3_get_field(node->int_gl_idx,
4834                                                 HNAE3_RING_GL_IDX_M,
4835                                                 HNAE3_RING_GL_IDX_S));
4836                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4837                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4838                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4839                         req->vfid = vport->vport_id;
4840
4841                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4842                         if (status) {
4843                                 dev_err(&hdev->pdev->dev,
4844                                         "Map TQP fail, status is %d.\n",
4845                                         status);
4846                                 return -EIO;
4847                         }
4848                         i = 0;
4849
4850                         hclge_cmd_setup_basic_desc(&desc,
4851                                                    op,
4852                                                    false);
4853                         req->int_vector_id_l =
4854                                 hnae3_get_field(vector_id,
4855                                                 HCLGE_VECTOR_ID_L_M,
4856                                                 HCLGE_VECTOR_ID_L_S);
4857                         req->int_vector_id_h =
4858                                 hnae3_get_field(vector_id,
4859                                                 HCLGE_VECTOR_ID_H_M,
4860                                                 HCLGE_VECTOR_ID_H_S);
4861                 }
4862         }
4863
4864         if (i > 0) {
4865                 req->int_cause_num = i;
4866                 req->vfid = vport->vport_id;
4867                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4868                 if (status) {
4869                         dev_err(&hdev->pdev->dev,
4870                                 "Map TQP fail, status is %d.\n", status);
4871                         return -EIO;
4872                 }
4873         }
4874
4875         return 0;
4876 }
4877
4878 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4879                                     struct hnae3_ring_chain_node *ring_chain)
4880 {
4881         struct hclge_vport *vport = hclge_get_vport(handle);
4882         struct hclge_dev *hdev = vport->back;
4883         int vector_id;
4884
4885         vector_id = hclge_get_vector_index(hdev, vector);
4886         if (vector_id < 0) {
4887                 dev_err(&hdev->pdev->dev,
4888                         "failed to get vector index. vector=%d\n", vector);
4889                 return vector_id;
4890         }
4891
4892         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4893 }
4894
4895 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4896                                        struct hnae3_ring_chain_node *ring_chain)
4897 {
4898         struct hclge_vport *vport = hclge_get_vport(handle);
4899         struct hclge_dev *hdev = vport->back;
4900         int vector_id, ret;
4901
4902         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4903                 return 0;
4904
4905         vector_id = hclge_get_vector_index(hdev, vector);
4906         if (vector_id < 0) {
4907                 dev_err(&handle->pdev->dev,
4908                         "Get vector index fail. ret =%d\n", vector_id);
4909                 return vector_id;
4910         }
4911
4912         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4913         if (ret)
4914                 dev_err(&handle->pdev->dev,
4915                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4916                         vector_id, ret);
4917
4918         return ret;
4919 }
4920
4921 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
4922                                       bool en_uc, bool en_mc, bool en_bc)
4923 {
4924         struct hclge_vport *vport = &hdev->vport[vf_id];
4925         struct hnae3_handle *handle = &vport->nic;
4926         struct hclge_promisc_cfg_cmd *req;
4927         struct hclge_desc desc;
4928         bool uc_tx_en = en_uc;
4929         u8 promisc_cfg = 0;
4930         int ret;
4931
4932         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4933
4934         req = (struct hclge_promisc_cfg_cmd *)desc.data;
4935         req->vf_id = vf_id;
4936
4937         if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
4938                 uc_tx_en = false;
4939
4940         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
4941         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
4942         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
4943         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
4944         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
4945         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
4946         req->extend_promisc = promisc_cfg;
4947
4948         /* to be compatible with DEVICE_VERSION_V1/2 */
4949         promisc_cfg = 0;
4950         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
4951         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
4952         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
4953         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
4954         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
4955         req->promisc = promisc_cfg;
4956
4957         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4958         if (ret)
4959                 dev_err(&hdev->pdev->dev,
4960                         "failed to set vport %u promisc mode, ret = %d.\n",
4961                         vf_id, ret);
4962
4963         return ret;
4964 }
4965
4966 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4967                                  bool en_mc_pmc, bool en_bc_pmc)
4968 {
4969         return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
4970                                           en_uc_pmc, en_mc_pmc, en_bc_pmc);
4971 }
4972
4973 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4974                                   bool en_mc_pmc)
4975 {
4976         struct hclge_vport *vport = hclge_get_vport(handle);
4977         struct hclge_dev *hdev = vport->back;
4978         bool en_bc_pmc = true;
4979
4980         /* For device whose version below V2, if broadcast promisc enabled,
4981          * vlan filter is always bypassed. So broadcast promisc should be
4982          * disabled until user enable promisc mode
4983          */
4984         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
4985                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4986
4987         return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4988                                             en_bc_pmc);
4989 }
4990
4991 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
4992 {
4993         struct hclge_vport *vport = hclge_get_vport(handle);
4994         struct hclge_dev *hdev = vport->back;
4995
4996         set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
4997 }
4998
4999 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5000 {
5001         struct hclge_get_fd_mode_cmd *req;
5002         struct hclge_desc desc;
5003         int ret;
5004
5005         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5006
5007         req = (struct hclge_get_fd_mode_cmd *)desc.data;
5008
5009         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5010         if (ret) {
5011                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5012                 return ret;
5013         }
5014
5015         *fd_mode = req->mode;
5016
5017         return ret;
5018 }
5019
5020 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5021                                    u32 *stage1_entry_num,
5022                                    u32 *stage2_entry_num,
5023                                    u16 *stage1_counter_num,
5024                                    u16 *stage2_counter_num)
5025 {
5026         struct hclge_get_fd_allocation_cmd *req;
5027         struct hclge_desc desc;
5028         int ret;
5029
5030         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5031
5032         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5033
5034         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5035         if (ret) {
5036                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5037                         ret);
5038                 return ret;
5039         }
5040
5041         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5042         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5043         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5044         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5045
5046         return ret;
5047 }
5048
5049 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5050                                    enum HCLGE_FD_STAGE stage_num)
5051 {
5052         struct hclge_set_fd_key_config_cmd *req;
5053         struct hclge_fd_key_cfg *stage;
5054         struct hclge_desc desc;
5055         int ret;
5056
5057         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5058
5059         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5060         stage = &hdev->fd_cfg.key_cfg[stage_num];
5061         req->stage = stage_num;
5062         req->key_select = stage->key_sel;
5063         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5064         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5065         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5066         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5067         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5068         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5069
5070         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5071         if (ret)
5072                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5073
5074         return ret;
5075 }
5076
5077 static int hclge_init_fd_config(struct hclge_dev *hdev)
5078 {
5079 #define LOW_2_WORDS             0x03
5080         struct hclge_fd_key_cfg *key_cfg;
5081         int ret;
5082
5083         if (!hnae3_dev_fd_supported(hdev))
5084                 return 0;
5085
5086         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5087         if (ret)
5088                 return ret;
5089
5090         switch (hdev->fd_cfg.fd_mode) {
5091         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5092                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5093                 break;
5094         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5095                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5096                 break;
5097         default:
5098                 dev_err(&hdev->pdev->dev,
5099                         "Unsupported flow director mode %u\n",
5100                         hdev->fd_cfg.fd_mode);
5101                 return -EOPNOTSUPP;
5102         }
5103
5104         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5105         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5106         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5107         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5108         key_cfg->outer_sipv6_word_en = 0;
5109         key_cfg->outer_dipv6_word_en = 0;
5110
5111         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5112                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5113                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5114                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5115
5116         /* If use max 400bit key, we can support tuples for ether type */
5117         if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
5118                 key_cfg->tuple_active |=
5119                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5120
5121         /* roce_type is used to filter roce frames
5122          * dst_vport is used to specify the rule
5123          */
5124         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5125
5126         ret = hclge_get_fd_allocation(hdev,
5127                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5128                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5129                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5130                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5131         if (ret)
5132                 return ret;
5133
5134         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5135 }
5136
5137 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5138                                 int loc, u8 *key, bool is_add)
5139 {
5140         struct hclge_fd_tcam_config_1_cmd *req1;
5141         struct hclge_fd_tcam_config_2_cmd *req2;
5142         struct hclge_fd_tcam_config_3_cmd *req3;
5143         struct hclge_desc desc[3];
5144         int ret;
5145
5146         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5147         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5148         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5149         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5150         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5151
5152         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5153         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5154         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5155
5156         req1->stage = stage;
5157         req1->xy_sel = sel_x ? 1 : 0;
5158         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5159         req1->index = cpu_to_le32(loc);
5160         req1->entry_vld = sel_x ? is_add : 0;
5161
5162         if (key) {
5163                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5164                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5165                        sizeof(req2->tcam_data));
5166                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5167                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5168         }
5169
5170         ret = hclge_cmd_send(&hdev->hw, desc, 3);
5171         if (ret)
5172                 dev_err(&hdev->pdev->dev,
5173                         "config tcam key fail, ret=%d\n",
5174                         ret);
5175
5176         return ret;
5177 }
5178
5179 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5180                               struct hclge_fd_ad_data *action)
5181 {
5182         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5183         struct hclge_fd_ad_config_cmd *req;
5184         struct hclge_desc desc;
5185         u64 ad_data = 0;
5186         int ret;
5187
5188         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5189
5190         req = (struct hclge_fd_ad_config_cmd *)desc.data;
5191         req->index = cpu_to_le32(loc);
5192         req->stage = stage;
5193
5194         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5195                       action->write_rule_id_to_bd);
5196         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5197                         action->rule_id);
5198         if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5199                 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5200                               action->override_tc);
5201                 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5202                                 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5203         }
5204         ad_data <<= 32;
5205         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5206         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5207                       action->forward_to_direct_queue);
5208         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5209                         action->queue_id);
5210         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5211         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5212                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5213         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5214         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5215                         action->counter_id);
5216
5217         req->ad_data = cpu_to_le64(ad_data);
5218         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5219         if (ret)
5220                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5221
5222         return ret;
5223 }
5224
5225 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5226                                    struct hclge_fd_rule *rule)
5227 {
5228         u16 tmp_x_s, tmp_y_s;
5229         u32 tmp_x_l, tmp_y_l;
5230         int i;
5231
5232         if (rule->unused_tuple & tuple_bit)
5233                 return true;
5234
5235         switch (tuple_bit) {
5236         case BIT(INNER_DST_MAC):
5237                 for (i = 0; i < ETH_ALEN; i++) {
5238                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5239                                rule->tuples_mask.dst_mac[i]);
5240                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5241                                rule->tuples_mask.dst_mac[i]);
5242                 }
5243
5244                 return true;
5245         case BIT(INNER_SRC_MAC):
5246                 for (i = 0; i < ETH_ALEN; i++) {
5247                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5248                                rule->tuples.src_mac[i]);
5249                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5250                                rule->tuples.src_mac[i]);
5251                 }
5252
5253                 return true;
5254         case BIT(INNER_VLAN_TAG_FST):
5255                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5256                        rule->tuples_mask.vlan_tag1);
5257                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5258                        rule->tuples_mask.vlan_tag1);
5259                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5260                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5261
5262                 return true;
5263         case BIT(INNER_ETH_TYPE):
5264                 calc_x(tmp_x_s, rule->tuples.ether_proto,
5265                        rule->tuples_mask.ether_proto);
5266                 calc_y(tmp_y_s, rule->tuples.ether_proto,
5267                        rule->tuples_mask.ether_proto);
5268                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5269                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5270
5271                 return true;
5272         case BIT(INNER_IP_TOS):
5273                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5274                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5275
5276                 return true;
5277         case BIT(INNER_IP_PROTO):
5278                 calc_x(*key_x, rule->tuples.ip_proto,
5279                        rule->tuples_mask.ip_proto);
5280                 calc_y(*key_y, rule->tuples.ip_proto,
5281                        rule->tuples_mask.ip_proto);
5282
5283                 return true;
5284         case BIT(INNER_SRC_IP):
5285                 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5286                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5287                 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5288                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5289                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5290                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5291
5292                 return true;
5293         case BIT(INNER_DST_IP):
5294                 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5295                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5296                 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5297                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5298                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5299                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5300
5301                 return true;
5302         case BIT(INNER_SRC_PORT):
5303                 calc_x(tmp_x_s, rule->tuples.src_port,
5304                        rule->tuples_mask.src_port);
5305                 calc_y(tmp_y_s, rule->tuples.src_port,
5306                        rule->tuples_mask.src_port);
5307                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5308                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5309
5310                 return true;
5311         case BIT(INNER_DST_PORT):
5312                 calc_x(tmp_x_s, rule->tuples.dst_port,
5313                        rule->tuples_mask.dst_port);
5314                 calc_y(tmp_y_s, rule->tuples.dst_port,
5315                        rule->tuples_mask.dst_port);
5316                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5317                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5318
5319                 return true;
5320         default:
5321                 return false;
5322         }
5323 }
5324
5325 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5326                                  u8 vf_id, u8 network_port_id)
5327 {
5328         u32 port_number = 0;
5329
5330         if (port_type == HOST_PORT) {
5331                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5332                                 pf_id);
5333                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5334                                 vf_id);
5335                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5336         } else {
5337                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5338                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5339                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5340         }
5341
5342         return port_number;
5343 }
5344
5345 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5346                                        __le32 *key_x, __le32 *key_y,
5347                                        struct hclge_fd_rule *rule)
5348 {
5349         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5350         u8 cur_pos = 0, tuple_size, shift_bits;
5351         unsigned int i;
5352
5353         for (i = 0; i < MAX_META_DATA; i++) {
5354                 tuple_size = meta_data_key_info[i].key_length;
5355                 tuple_bit = key_cfg->meta_data_active & BIT(i);
5356
5357                 switch (tuple_bit) {
5358                 case BIT(ROCE_TYPE):
5359                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5360                         cur_pos += tuple_size;
5361                         break;
5362                 case BIT(DST_VPORT):
5363                         port_number = hclge_get_port_number(HOST_PORT, 0,
5364                                                             rule->vf_id, 0);
5365                         hnae3_set_field(meta_data,
5366                                         GENMASK(cur_pos + tuple_size, cur_pos),
5367                                         cur_pos, port_number);
5368                         cur_pos += tuple_size;
5369                         break;
5370                 default:
5371                         break;
5372                 }
5373         }
5374
5375         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5376         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5377         shift_bits = sizeof(meta_data) * 8 - cur_pos;
5378
5379         *key_x = cpu_to_le32(tmp_x << shift_bits);
5380         *key_y = cpu_to_le32(tmp_y << shift_bits);
5381 }
5382
5383 /* A complete key is combined with meta data key and tuple key.
5384  * Meta data key is stored at the MSB region, and tuple key is stored at
5385  * the LSB region, unused bits will be filled 0.
5386  */
5387 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5388                             struct hclge_fd_rule *rule)
5389 {
5390         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5391         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5392         u8 *cur_key_x, *cur_key_y;
5393         u8 meta_data_region;
5394         u8 tuple_size;
5395         int ret;
5396         u32 i;
5397
5398         memset(key_x, 0, sizeof(key_x));
5399         memset(key_y, 0, sizeof(key_y));
5400         cur_key_x = key_x;
5401         cur_key_y = key_y;
5402
5403         for (i = 0 ; i < MAX_TUPLE; i++) {
5404                 bool tuple_valid;
5405                 u32 check_tuple;
5406
5407                 tuple_size = tuple_key_info[i].key_length / 8;
5408                 check_tuple = key_cfg->tuple_active & BIT(i);
5409
5410                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5411                                                      cur_key_y, rule);
5412                 if (tuple_valid) {
5413                         cur_key_x += tuple_size;
5414                         cur_key_y += tuple_size;
5415                 }
5416         }
5417
5418         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5419                         MAX_META_DATA_LENGTH / 8;
5420
5421         hclge_fd_convert_meta_data(key_cfg,
5422                                    (__le32 *)(key_x + meta_data_region),
5423                                    (__le32 *)(key_y + meta_data_region),
5424                                    rule);
5425
5426         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5427                                    true);
5428         if (ret) {
5429                 dev_err(&hdev->pdev->dev,
5430                         "fd key_y config fail, loc=%u, ret=%d\n",
5431                         rule->queue_id, ret);
5432                 return ret;
5433         }
5434
5435         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5436                                    true);
5437         if (ret)
5438                 dev_err(&hdev->pdev->dev,
5439                         "fd key_x config fail, loc=%u, ret=%d\n",
5440                         rule->queue_id, ret);
5441         return ret;
5442 }
5443
5444 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5445                                struct hclge_fd_rule *rule)
5446 {
5447         struct hclge_vport *vport = hdev->vport;
5448         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5449         struct hclge_fd_ad_data ad_data;
5450
5451         memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5452         ad_data.ad_id = rule->location;
5453
5454         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5455                 ad_data.drop_packet = true;
5456         } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5457                 ad_data.override_tc = true;
5458                 ad_data.queue_id =
5459                         kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5460                 ad_data.tc_size =
5461                         ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5462         } else {
5463                 ad_data.forward_to_direct_queue = true;
5464                 ad_data.queue_id = rule->queue_id;
5465         }
5466
5467         ad_data.use_counter = false;
5468         ad_data.counter_id = 0;
5469
5470         ad_data.use_next_stage = false;
5471         ad_data.next_input_key = 0;
5472
5473         ad_data.write_rule_id_to_bd = true;
5474         ad_data.rule_id = rule->location;
5475
5476         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5477 }
5478
5479 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5480                                        u32 *unused_tuple)
5481 {
5482         if (!spec || !unused_tuple)
5483                 return -EINVAL;
5484
5485         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5486
5487         if (!spec->ip4src)
5488                 *unused_tuple |= BIT(INNER_SRC_IP);
5489
5490         if (!spec->ip4dst)
5491                 *unused_tuple |= BIT(INNER_DST_IP);
5492
5493         if (!spec->psrc)
5494                 *unused_tuple |= BIT(INNER_SRC_PORT);
5495
5496         if (!spec->pdst)
5497                 *unused_tuple |= BIT(INNER_DST_PORT);
5498
5499         if (!spec->tos)
5500                 *unused_tuple |= BIT(INNER_IP_TOS);
5501
5502         return 0;
5503 }
5504
5505 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5506                                     u32 *unused_tuple)
5507 {
5508         if (!spec || !unused_tuple)
5509                 return -EINVAL;
5510
5511         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5512                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5513
5514         if (!spec->ip4src)
5515                 *unused_tuple |= BIT(INNER_SRC_IP);
5516
5517         if (!spec->ip4dst)
5518                 *unused_tuple |= BIT(INNER_DST_IP);
5519
5520         if (!spec->tos)
5521                 *unused_tuple |= BIT(INNER_IP_TOS);
5522
5523         if (!spec->proto)
5524                 *unused_tuple |= BIT(INNER_IP_PROTO);
5525
5526         if (spec->l4_4_bytes)
5527                 return -EOPNOTSUPP;
5528
5529         if (spec->ip_ver != ETH_RX_NFC_IP4)
5530                 return -EOPNOTSUPP;
5531
5532         return 0;
5533 }
5534
5535 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5536                                        u32 *unused_tuple)
5537 {
5538         if (!spec || !unused_tuple)
5539                 return -EINVAL;
5540
5541         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5542                 BIT(INNER_IP_TOS);
5543
5544         /* check whether src/dst ip address used */
5545         if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5546                 *unused_tuple |= BIT(INNER_SRC_IP);
5547
5548         if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5549                 *unused_tuple |= BIT(INNER_DST_IP);
5550
5551         if (!spec->psrc)
5552                 *unused_tuple |= BIT(INNER_SRC_PORT);
5553
5554         if (!spec->pdst)
5555                 *unused_tuple |= BIT(INNER_DST_PORT);
5556
5557         if (spec->tclass)
5558                 return -EOPNOTSUPP;
5559
5560         return 0;
5561 }
5562
5563 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5564                                     u32 *unused_tuple)
5565 {
5566         if (!spec || !unused_tuple)
5567                 return -EINVAL;
5568
5569         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5570                 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5571
5572         /* check whether src/dst ip address used */
5573         if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5574                 *unused_tuple |= BIT(INNER_SRC_IP);
5575
5576         if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5577                 *unused_tuple |= BIT(INNER_DST_IP);
5578
5579         if (!spec->l4_proto)
5580                 *unused_tuple |= BIT(INNER_IP_PROTO);
5581
5582         if (spec->tclass)
5583                 return -EOPNOTSUPP;
5584
5585         if (spec->l4_4_bytes)
5586                 return -EOPNOTSUPP;
5587
5588         return 0;
5589 }
5590
5591 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5592 {
5593         if (!spec || !unused_tuple)
5594                 return -EINVAL;
5595
5596         *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5597                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5598                 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5599
5600         if (is_zero_ether_addr(spec->h_source))
5601                 *unused_tuple |= BIT(INNER_SRC_MAC);
5602
5603         if (is_zero_ether_addr(spec->h_dest))
5604                 *unused_tuple |= BIT(INNER_DST_MAC);
5605
5606         if (!spec->h_proto)
5607                 *unused_tuple |= BIT(INNER_ETH_TYPE);
5608
5609         return 0;
5610 }
5611
5612 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5613                                     struct ethtool_rx_flow_spec *fs,
5614                                     u32 *unused_tuple)
5615 {
5616         if (fs->flow_type & FLOW_EXT) {
5617                 if (fs->h_ext.vlan_etype) {
5618                         dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5619                         return -EOPNOTSUPP;
5620                 }
5621
5622                 if (!fs->h_ext.vlan_tci)
5623                         *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5624
5625                 if (fs->m_ext.vlan_tci &&
5626                     be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5627                         dev_err(&hdev->pdev->dev,
5628                                 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
5629                                 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5630                         return -EINVAL;
5631                 }
5632         } else {
5633                 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5634         }
5635
5636         if (fs->flow_type & FLOW_MAC_EXT) {
5637                 if (hdev->fd_cfg.fd_mode !=
5638                     HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5639                         dev_err(&hdev->pdev->dev,
5640                                 "FLOW_MAC_EXT is not supported in current fd mode!\n");
5641                         return -EOPNOTSUPP;
5642                 }
5643
5644                 if (is_zero_ether_addr(fs->h_ext.h_dest))
5645                         *unused_tuple |= BIT(INNER_DST_MAC);
5646                 else
5647                         *unused_tuple &= ~BIT(INNER_DST_MAC);
5648         }
5649
5650         return 0;
5651 }
5652
5653 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5654                                struct ethtool_rx_flow_spec *fs,
5655                                u32 *unused_tuple)
5656 {
5657         u32 flow_type;
5658         int ret;
5659
5660         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5661                 dev_err(&hdev->pdev->dev,
5662                         "failed to config fd rules, invalid rule location: %u, max is %u\n.",
5663                         fs->location,
5664                         hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5665                 return -EINVAL;
5666         }
5667
5668         if ((fs->flow_type & FLOW_EXT) &&
5669             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5670                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5671                 return -EOPNOTSUPP;
5672         }
5673
5674         flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5675         switch (flow_type) {
5676         case SCTP_V4_FLOW:
5677         case TCP_V4_FLOW:
5678         case UDP_V4_FLOW:
5679                 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5680                                                   unused_tuple);
5681                 break;
5682         case IP_USER_FLOW:
5683                 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5684                                                unused_tuple);
5685                 break;
5686         case SCTP_V6_FLOW:
5687         case TCP_V6_FLOW:
5688         case UDP_V6_FLOW:
5689                 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5690                                                   unused_tuple);
5691                 break;
5692         case IPV6_USER_FLOW:
5693                 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5694                                                unused_tuple);
5695                 break;
5696         case ETHER_FLOW:
5697                 if (hdev->fd_cfg.fd_mode !=
5698                         HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5699                         dev_err(&hdev->pdev->dev,
5700                                 "ETHER_FLOW is not supported in current fd mode!\n");
5701                         return -EOPNOTSUPP;
5702                 }
5703
5704                 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5705                                                  unused_tuple);
5706                 break;
5707         default:
5708                 dev_err(&hdev->pdev->dev,
5709                         "unsupported protocol type, protocol type = %#x\n",
5710                         flow_type);
5711                 return -EOPNOTSUPP;
5712         }
5713
5714         if (ret) {
5715                 dev_err(&hdev->pdev->dev,
5716                         "failed to check flow union tuple, ret = %d\n",
5717                         ret);
5718                 return ret;
5719         }
5720
5721         return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5722 }
5723
5724 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5725 {
5726         struct hclge_fd_rule *rule = NULL;
5727         struct hlist_node *node2;
5728
5729         spin_lock_bh(&hdev->fd_rule_lock);
5730         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5731                 if (rule->location >= location)
5732                         break;
5733         }
5734
5735         spin_unlock_bh(&hdev->fd_rule_lock);
5736
5737         return  rule && rule->location == location;
5738 }
5739
5740 /* make sure being called after lock up with fd_rule_lock */
5741 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5742                                      struct hclge_fd_rule *new_rule,
5743                                      u16 location,
5744                                      bool is_add)
5745 {
5746         struct hclge_fd_rule *rule = NULL, *parent = NULL;
5747         struct hlist_node *node2;
5748
5749         if (is_add && !new_rule)
5750                 return -EINVAL;
5751
5752         hlist_for_each_entry_safe(rule, node2,
5753                                   &hdev->fd_rule_list, rule_node) {
5754                 if (rule->location >= location)
5755                         break;
5756                 parent = rule;
5757         }
5758
5759         if (rule && rule->location == location) {
5760                 hlist_del(&rule->rule_node);
5761                 kfree(rule);
5762                 hdev->hclge_fd_rule_num--;
5763
5764                 if (!is_add) {
5765                         if (!hdev->hclge_fd_rule_num)
5766                                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5767                         clear_bit(location, hdev->fd_bmap);
5768
5769                         return 0;
5770                 }
5771         } else if (!is_add) {
5772                 dev_err(&hdev->pdev->dev,
5773                         "delete fail, rule %u is inexistent\n",
5774                         location);
5775                 return -EINVAL;
5776         }
5777
5778         INIT_HLIST_NODE(&new_rule->rule_node);
5779
5780         if (parent)
5781                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5782         else
5783                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5784
5785         set_bit(location, hdev->fd_bmap);
5786         hdev->hclge_fd_rule_num++;
5787         hdev->fd_active_type = new_rule->rule_type;
5788
5789         return 0;
5790 }
5791
5792 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5793                               struct ethtool_rx_flow_spec *fs,
5794                               struct hclge_fd_rule *rule)
5795 {
5796         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5797
5798         switch (flow_type) {
5799         case SCTP_V4_FLOW:
5800         case TCP_V4_FLOW:
5801         case UDP_V4_FLOW:
5802                 rule->tuples.src_ip[IPV4_INDEX] =
5803                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5804                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5805                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5806
5807                 rule->tuples.dst_ip[IPV4_INDEX] =
5808                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5809                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5810                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5811
5812                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5813                 rule->tuples_mask.src_port =
5814                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5815
5816                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5817                 rule->tuples_mask.dst_port =
5818                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5819
5820                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5821                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5822
5823                 rule->tuples.ether_proto = ETH_P_IP;
5824                 rule->tuples_mask.ether_proto = 0xFFFF;
5825
5826                 break;
5827         case IP_USER_FLOW:
5828                 rule->tuples.src_ip[IPV4_INDEX] =
5829                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5830                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5831                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5832
5833                 rule->tuples.dst_ip[IPV4_INDEX] =
5834                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5835                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5836                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5837
5838                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5839                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5840
5841                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5842                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5843
5844                 rule->tuples.ether_proto = ETH_P_IP;
5845                 rule->tuples_mask.ether_proto = 0xFFFF;
5846
5847                 break;
5848         case SCTP_V6_FLOW:
5849         case TCP_V6_FLOW:
5850         case UDP_V6_FLOW:
5851                 be32_to_cpu_array(rule->tuples.src_ip,
5852                                   fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5853                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5854                                   fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5855
5856                 be32_to_cpu_array(rule->tuples.dst_ip,
5857                                   fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5858                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5859                                   fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5860
5861                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5862                 rule->tuples_mask.src_port =
5863                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5864
5865                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5866                 rule->tuples_mask.dst_port =
5867                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5868
5869                 rule->tuples.ether_proto = ETH_P_IPV6;
5870                 rule->tuples_mask.ether_proto = 0xFFFF;
5871
5872                 break;
5873         case IPV6_USER_FLOW:
5874                 be32_to_cpu_array(rule->tuples.src_ip,
5875                                   fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5876                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5877                                   fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5878
5879                 be32_to_cpu_array(rule->tuples.dst_ip,
5880                                   fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5881                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5882                                   fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5883
5884                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5885                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5886
5887                 rule->tuples.ether_proto = ETH_P_IPV6;
5888                 rule->tuples_mask.ether_proto = 0xFFFF;
5889
5890                 break;
5891         case ETHER_FLOW:
5892                 ether_addr_copy(rule->tuples.src_mac,
5893                                 fs->h_u.ether_spec.h_source);
5894                 ether_addr_copy(rule->tuples_mask.src_mac,
5895                                 fs->m_u.ether_spec.h_source);
5896
5897                 ether_addr_copy(rule->tuples.dst_mac,
5898                                 fs->h_u.ether_spec.h_dest);
5899                 ether_addr_copy(rule->tuples_mask.dst_mac,
5900                                 fs->m_u.ether_spec.h_dest);
5901
5902                 rule->tuples.ether_proto =
5903                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5904                 rule->tuples_mask.ether_proto =
5905                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5906
5907                 break;
5908         default:
5909                 return -EOPNOTSUPP;
5910         }
5911
5912         switch (flow_type) {
5913         case SCTP_V4_FLOW:
5914         case SCTP_V6_FLOW:
5915                 rule->tuples.ip_proto = IPPROTO_SCTP;
5916                 rule->tuples_mask.ip_proto = 0xFF;
5917                 break;
5918         case TCP_V4_FLOW:
5919         case TCP_V6_FLOW:
5920                 rule->tuples.ip_proto = IPPROTO_TCP;
5921                 rule->tuples_mask.ip_proto = 0xFF;
5922                 break;
5923         case UDP_V4_FLOW:
5924         case UDP_V6_FLOW:
5925                 rule->tuples.ip_proto = IPPROTO_UDP;
5926                 rule->tuples_mask.ip_proto = 0xFF;
5927                 break;
5928         default:
5929                 break;
5930         }
5931
5932         if (fs->flow_type & FLOW_EXT) {
5933                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5934                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5935         }
5936
5937         if (fs->flow_type & FLOW_MAC_EXT) {
5938                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5939                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5940         }
5941
5942         return 0;
5943 }
5944
5945 /* make sure being called after lock up with fd_rule_lock */
5946 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5947                                 struct hclge_fd_rule *rule)
5948 {
5949         int ret;
5950
5951         if (!rule) {
5952                 dev_err(&hdev->pdev->dev,
5953                         "The flow director rule is NULL\n");
5954                 return -EINVAL;
5955         }
5956
5957         /* it will never fail here, so needn't to check return value */
5958         hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5959
5960         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5961         if (ret)
5962                 goto clear_rule;
5963
5964         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5965         if (ret)
5966                 goto clear_rule;
5967
5968         return 0;
5969
5970 clear_rule:
5971         hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5972         return ret;
5973 }
5974
5975 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
5976 {
5977         struct hclge_vport *vport = hclge_get_vport(handle);
5978         struct hclge_dev *hdev = vport->back;
5979
5980         return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
5981 }
5982
5983 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5984                               struct ethtool_rxnfc *cmd)
5985 {
5986         struct hclge_vport *vport = hclge_get_vport(handle);
5987         struct hclge_dev *hdev = vport->back;
5988         u16 dst_vport_id = 0, q_index = 0;
5989         struct ethtool_rx_flow_spec *fs;
5990         struct hclge_fd_rule *rule;
5991         u32 unused = 0;
5992         u8 action;
5993         int ret;
5994
5995         if (!hnae3_dev_fd_supported(hdev)) {
5996                 dev_err(&hdev->pdev->dev,
5997                         "flow table director is not supported\n");
5998                 return -EOPNOTSUPP;
5999         }
6000
6001         if (!hdev->fd_en) {
6002                 dev_err(&hdev->pdev->dev,
6003                         "please enable flow director first\n");
6004                 return -EOPNOTSUPP;
6005         }
6006
6007         if (hclge_is_cls_flower_active(handle)) {
6008                 dev_err(&hdev->pdev->dev,
6009                         "please delete all exist cls flower rules first\n");
6010                 return -EINVAL;
6011         }
6012
6013         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6014
6015         ret = hclge_fd_check_spec(hdev, fs, &unused);
6016         if (ret)
6017                 return ret;
6018
6019         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
6020                 action = HCLGE_FD_ACTION_DROP_PACKET;
6021         } else {
6022                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
6023                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
6024                 u16 tqps;
6025
6026                 if (vf > hdev->num_req_vfs) {
6027                         dev_err(&hdev->pdev->dev,
6028                                 "Error: vf id (%u) > max vf num (%u)\n",
6029                                 vf, hdev->num_req_vfs);
6030                         return -EINVAL;
6031                 }
6032
6033                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6034                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
6035
6036                 if (ring >= tqps) {
6037                         dev_err(&hdev->pdev->dev,
6038                                 "Error: queue id (%u) > max tqp num (%u)\n",
6039                                 ring, tqps - 1);
6040                         return -EINVAL;
6041                 }
6042
6043                 action = HCLGE_FD_ACTION_SELECT_QUEUE;
6044                 q_index = ring;
6045         }
6046
6047         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6048         if (!rule)
6049                 return -ENOMEM;
6050
6051         ret = hclge_fd_get_tuple(hdev, fs, rule);
6052         if (ret) {
6053                 kfree(rule);
6054                 return ret;
6055         }
6056
6057         rule->flow_type = fs->flow_type;
6058         rule->location = fs->location;
6059         rule->unused_tuple = unused;
6060         rule->vf_id = dst_vport_id;
6061         rule->queue_id = q_index;
6062         rule->action = action;
6063         rule->rule_type = HCLGE_FD_EP_ACTIVE;
6064
6065         /* to avoid rule conflict, when user configure rule by ethtool,
6066          * we need to clear all arfs rules
6067          */
6068         spin_lock_bh(&hdev->fd_rule_lock);
6069         hclge_clear_arfs_rules(handle);
6070
6071         ret = hclge_fd_config_rule(hdev, rule);
6072
6073         spin_unlock_bh(&hdev->fd_rule_lock);
6074
6075         return ret;
6076 }
6077
6078 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6079                               struct ethtool_rxnfc *cmd)
6080 {
6081         struct hclge_vport *vport = hclge_get_vport(handle);
6082         struct hclge_dev *hdev = vport->back;
6083         struct ethtool_rx_flow_spec *fs;
6084         int ret;
6085
6086         if (!hnae3_dev_fd_supported(hdev))
6087                 return -EOPNOTSUPP;
6088
6089         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6090
6091         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6092                 return -EINVAL;
6093
6094         if (hclge_is_cls_flower_active(handle) || !hdev->hclge_fd_rule_num ||
6095             !hclge_fd_rule_exist(hdev, fs->location)) {
6096                 dev_err(&hdev->pdev->dev,
6097                         "Delete fail, rule %u is inexistent\n", fs->location);
6098                 return -ENOENT;
6099         }
6100
6101         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6102                                    NULL, false);
6103         if (ret)
6104                 return ret;
6105
6106         spin_lock_bh(&hdev->fd_rule_lock);
6107         ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
6108
6109         spin_unlock_bh(&hdev->fd_rule_lock);
6110
6111         return ret;
6112 }
6113
6114 /* make sure being called after lock up with fd_rule_lock */
6115 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
6116                                      bool clear_list)
6117 {
6118         struct hclge_vport *vport = hclge_get_vport(handle);
6119         struct hclge_dev *hdev = vport->back;
6120         struct hclge_fd_rule *rule;
6121         struct hlist_node *node;
6122         u16 location;
6123
6124         if (!hnae3_dev_fd_supported(hdev))
6125                 return;
6126
6127         for_each_set_bit(location, hdev->fd_bmap,
6128                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6129                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6130                                      NULL, false);
6131
6132         if (clear_list) {
6133                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6134                                           rule_node) {
6135                         hlist_del(&rule->rule_node);
6136                         kfree(rule);
6137                 }
6138                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6139                 hdev->hclge_fd_rule_num = 0;
6140                 bitmap_zero(hdev->fd_bmap,
6141                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6142         }
6143 }
6144
6145 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6146 {
6147         struct hclge_vport *vport = hclge_get_vport(handle);
6148         struct hclge_dev *hdev = vport->back;
6149         struct hclge_fd_rule *rule;
6150         struct hlist_node *node;
6151         int ret;
6152
6153         /* Return ok here, because reset error handling will check this
6154          * return value. If error is returned here, the reset process will
6155          * fail.
6156          */
6157         if (!hnae3_dev_fd_supported(hdev))
6158                 return 0;
6159
6160         /* if fd is disabled, should not restore it when reset */
6161         if (!hdev->fd_en)
6162                 return 0;
6163
6164         spin_lock_bh(&hdev->fd_rule_lock);
6165         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6166                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6167                 if (!ret)
6168                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6169
6170                 if (ret) {
6171                         dev_warn(&hdev->pdev->dev,
6172                                  "Restore rule %u failed, remove it\n",
6173                                  rule->location);
6174                         clear_bit(rule->location, hdev->fd_bmap);
6175                         hlist_del(&rule->rule_node);
6176                         kfree(rule);
6177                         hdev->hclge_fd_rule_num--;
6178                 }
6179         }
6180
6181         if (hdev->hclge_fd_rule_num)
6182                 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
6183
6184         spin_unlock_bh(&hdev->fd_rule_lock);
6185
6186         return 0;
6187 }
6188
6189 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6190                                  struct ethtool_rxnfc *cmd)
6191 {
6192         struct hclge_vport *vport = hclge_get_vport(handle);
6193         struct hclge_dev *hdev = vport->back;
6194
6195         if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6196                 return -EOPNOTSUPP;
6197
6198         cmd->rule_cnt = hdev->hclge_fd_rule_num;
6199         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6200
6201         return 0;
6202 }
6203
6204 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6205                                      struct ethtool_tcpip4_spec *spec,
6206                                      struct ethtool_tcpip4_spec *spec_mask)
6207 {
6208         spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6209         spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6210                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6211
6212         spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6213         spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6214                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6215
6216         spec->psrc = cpu_to_be16(rule->tuples.src_port);
6217         spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6218                         0 : cpu_to_be16(rule->tuples_mask.src_port);
6219
6220         spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6221         spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6222                         0 : cpu_to_be16(rule->tuples_mask.dst_port);
6223
6224         spec->tos = rule->tuples.ip_tos;
6225         spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6226                         0 : rule->tuples_mask.ip_tos;
6227 }
6228
6229 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6230                                   struct ethtool_usrip4_spec *spec,
6231                                   struct ethtool_usrip4_spec *spec_mask)
6232 {
6233         spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6234         spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6235                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6236
6237         spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6238         spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6239                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6240
6241         spec->tos = rule->tuples.ip_tos;
6242         spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6243                         0 : rule->tuples_mask.ip_tos;
6244
6245         spec->proto = rule->tuples.ip_proto;
6246         spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6247                         0 : rule->tuples_mask.ip_proto;
6248
6249         spec->ip_ver = ETH_RX_NFC_IP4;
6250 }
6251
6252 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6253                                      struct ethtool_tcpip6_spec *spec,
6254                                      struct ethtool_tcpip6_spec *spec_mask)
6255 {
6256         cpu_to_be32_array(spec->ip6src,
6257                           rule->tuples.src_ip, IPV6_SIZE);
6258         cpu_to_be32_array(spec->ip6dst,
6259                           rule->tuples.dst_ip, IPV6_SIZE);
6260         if (rule->unused_tuple & BIT(INNER_SRC_IP))
6261                 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6262         else
6263                 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6264                                   IPV6_SIZE);
6265
6266         if (rule->unused_tuple & BIT(INNER_DST_IP))
6267                 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6268         else
6269                 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6270                                   IPV6_SIZE);
6271
6272         spec->psrc = cpu_to_be16(rule->tuples.src_port);
6273         spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6274                         0 : cpu_to_be16(rule->tuples_mask.src_port);
6275
6276         spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6277         spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6278                         0 : cpu_to_be16(rule->tuples_mask.dst_port);
6279 }
6280
6281 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6282                                   struct ethtool_usrip6_spec *spec,
6283                                   struct ethtool_usrip6_spec *spec_mask)
6284 {
6285         cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6286         cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6287         if (rule->unused_tuple & BIT(INNER_SRC_IP))
6288                 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6289         else
6290                 cpu_to_be32_array(spec_mask->ip6src,
6291                                   rule->tuples_mask.src_ip, IPV6_SIZE);
6292
6293         if (rule->unused_tuple & BIT(INNER_DST_IP))
6294                 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6295         else
6296                 cpu_to_be32_array(spec_mask->ip6dst,
6297                                   rule->tuples_mask.dst_ip, IPV6_SIZE);
6298
6299         spec->l4_proto = rule->tuples.ip_proto;
6300         spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6301                         0 : rule->tuples_mask.ip_proto;
6302 }
6303
6304 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6305                                     struct ethhdr *spec,
6306                                     struct ethhdr *spec_mask)
6307 {
6308         ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6309         ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6310
6311         if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6312                 eth_zero_addr(spec_mask->h_source);
6313         else
6314                 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6315
6316         if (rule->unused_tuple & BIT(INNER_DST_MAC))
6317                 eth_zero_addr(spec_mask->h_dest);
6318         else
6319                 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6320
6321         spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6322         spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6323                         0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6324 }
6325
6326 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6327                                   struct hclge_fd_rule *rule)
6328 {
6329         if (fs->flow_type & FLOW_EXT) {
6330                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6331                 fs->m_ext.vlan_tci =
6332                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6333                                 cpu_to_be16(VLAN_VID_MASK) :
6334                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
6335         }
6336
6337         if (fs->flow_type & FLOW_MAC_EXT) {
6338                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6339                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6340                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
6341                 else
6342                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
6343                                         rule->tuples_mask.dst_mac);
6344         }
6345 }
6346
6347 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6348                                   struct ethtool_rxnfc *cmd)
6349 {
6350         struct hclge_vport *vport = hclge_get_vport(handle);
6351         struct hclge_fd_rule *rule = NULL;
6352         struct hclge_dev *hdev = vport->back;
6353         struct ethtool_rx_flow_spec *fs;
6354         struct hlist_node *node2;
6355
6356         if (!hnae3_dev_fd_supported(hdev))
6357                 return -EOPNOTSUPP;
6358
6359         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6360
6361         spin_lock_bh(&hdev->fd_rule_lock);
6362
6363         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6364                 if (rule->location >= fs->location)
6365                         break;
6366         }
6367
6368         if (!rule || fs->location != rule->location) {
6369                 spin_unlock_bh(&hdev->fd_rule_lock);
6370
6371                 return -ENOENT;
6372         }
6373
6374         fs->flow_type = rule->flow_type;
6375         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6376         case SCTP_V4_FLOW:
6377         case TCP_V4_FLOW:
6378         case UDP_V4_FLOW:
6379                 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6380                                          &fs->m_u.tcp_ip4_spec);
6381                 break;
6382         case IP_USER_FLOW:
6383                 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6384                                       &fs->m_u.usr_ip4_spec);
6385                 break;
6386         case SCTP_V6_FLOW:
6387         case TCP_V6_FLOW:
6388         case UDP_V6_FLOW:
6389                 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6390                                          &fs->m_u.tcp_ip6_spec);
6391                 break;
6392         case IPV6_USER_FLOW:
6393                 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6394                                       &fs->m_u.usr_ip6_spec);
6395                 break;
6396         /* The flow type of fd rule has been checked before adding in to rule
6397          * list. As other flow types have been handled, it must be ETHER_FLOW
6398          * for the default case
6399          */
6400         default:
6401                 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6402                                         &fs->m_u.ether_spec);
6403                 break;
6404         }
6405
6406         hclge_fd_get_ext_info(fs, rule);
6407
6408         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6409                 fs->ring_cookie = RX_CLS_FLOW_DISC;
6410         } else {
6411                 u64 vf_id;
6412
6413                 fs->ring_cookie = rule->queue_id;
6414                 vf_id = rule->vf_id;
6415                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6416                 fs->ring_cookie |= vf_id;
6417         }
6418
6419         spin_unlock_bh(&hdev->fd_rule_lock);
6420
6421         return 0;
6422 }
6423
6424 static int hclge_get_all_rules(struct hnae3_handle *handle,
6425                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
6426 {
6427         struct hclge_vport *vport = hclge_get_vport(handle);
6428         struct hclge_dev *hdev = vport->back;
6429         struct hclge_fd_rule *rule;
6430         struct hlist_node *node2;
6431         int cnt = 0;
6432
6433         if (!hnae3_dev_fd_supported(hdev))
6434                 return -EOPNOTSUPP;
6435
6436         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6437
6438         spin_lock_bh(&hdev->fd_rule_lock);
6439         hlist_for_each_entry_safe(rule, node2,
6440                                   &hdev->fd_rule_list, rule_node) {
6441                 if (cnt == cmd->rule_cnt) {
6442                         spin_unlock_bh(&hdev->fd_rule_lock);
6443                         return -EMSGSIZE;
6444                 }
6445
6446                 rule_locs[cnt] = rule->location;
6447                 cnt++;
6448         }
6449
6450         spin_unlock_bh(&hdev->fd_rule_lock);
6451
6452         cmd->rule_cnt = cnt;
6453
6454         return 0;
6455 }
6456
6457 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6458                                      struct hclge_fd_rule_tuples *tuples)
6459 {
6460 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6461 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6462
6463         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6464         tuples->ip_proto = fkeys->basic.ip_proto;
6465         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6466
6467         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6468                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6469                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6470         } else {
6471                 int i;
6472
6473                 for (i = 0; i < IPV6_SIZE; i++) {
6474                         tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6475                         tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6476                 }
6477         }
6478 }
6479
6480 /* traverse all rules, check whether an existed rule has the same tuples */
6481 static struct hclge_fd_rule *
6482 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6483                           const struct hclge_fd_rule_tuples *tuples)
6484 {
6485         struct hclge_fd_rule *rule = NULL;
6486         struct hlist_node *node;
6487
6488         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6489                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6490                         return rule;
6491         }
6492
6493         return NULL;
6494 }
6495
6496 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6497                                      struct hclge_fd_rule *rule)
6498 {
6499         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6500                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6501                              BIT(INNER_SRC_PORT);
6502         rule->action = 0;
6503         rule->vf_id = 0;
6504         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6505         if (tuples->ether_proto == ETH_P_IP) {
6506                 if (tuples->ip_proto == IPPROTO_TCP)
6507                         rule->flow_type = TCP_V4_FLOW;
6508                 else
6509                         rule->flow_type = UDP_V4_FLOW;
6510         } else {
6511                 if (tuples->ip_proto == IPPROTO_TCP)
6512                         rule->flow_type = TCP_V6_FLOW;
6513                 else
6514                         rule->flow_type = UDP_V6_FLOW;
6515         }
6516         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6517         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6518 }
6519
6520 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6521                                       u16 flow_id, struct flow_keys *fkeys)
6522 {
6523         struct hclge_vport *vport = hclge_get_vport(handle);
6524         struct hclge_fd_rule_tuples new_tuples = {};
6525         struct hclge_dev *hdev = vport->back;
6526         struct hclge_fd_rule *rule;
6527         u16 tmp_queue_id;
6528         u16 bit_id;
6529         int ret;
6530
6531         if (!hnae3_dev_fd_supported(hdev))
6532                 return -EOPNOTSUPP;
6533
6534         /* when there is already fd rule existed add by user,
6535          * arfs should not work
6536          */
6537         spin_lock_bh(&hdev->fd_rule_lock);
6538         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
6539             hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
6540                 spin_unlock_bh(&hdev->fd_rule_lock);
6541                 return -EOPNOTSUPP;
6542         }
6543
6544         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6545
6546         /* check is there flow director filter existed for this flow,
6547          * if not, create a new filter for it;
6548          * if filter exist with different queue id, modify the filter;
6549          * if filter exist with same queue id, do nothing
6550          */
6551         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6552         if (!rule) {
6553                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6554                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6555                         spin_unlock_bh(&hdev->fd_rule_lock);
6556                         return -ENOSPC;
6557                 }
6558
6559                 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6560                 if (!rule) {
6561                         spin_unlock_bh(&hdev->fd_rule_lock);
6562                         return -ENOMEM;
6563                 }
6564
6565                 set_bit(bit_id, hdev->fd_bmap);
6566                 rule->location = bit_id;
6567                 rule->arfs.flow_id = flow_id;
6568                 rule->queue_id = queue_id;
6569                 hclge_fd_build_arfs_rule(&new_tuples, rule);
6570                 ret = hclge_fd_config_rule(hdev, rule);
6571
6572                 spin_unlock_bh(&hdev->fd_rule_lock);
6573
6574                 if (ret)
6575                         return ret;
6576
6577                 return rule->location;
6578         }
6579
6580         spin_unlock_bh(&hdev->fd_rule_lock);
6581
6582         if (rule->queue_id == queue_id)
6583                 return rule->location;
6584
6585         tmp_queue_id = rule->queue_id;
6586         rule->queue_id = queue_id;
6587         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6588         if (ret) {
6589                 rule->queue_id = tmp_queue_id;
6590                 return ret;
6591         }
6592
6593         return rule->location;
6594 }
6595
6596 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6597 {
6598 #ifdef CONFIG_RFS_ACCEL
6599         struct hnae3_handle *handle = &hdev->vport[0].nic;
6600         struct hclge_fd_rule *rule;
6601         struct hlist_node *node;
6602         HLIST_HEAD(del_list);
6603
6604         spin_lock_bh(&hdev->fd_rule_lock);
6605         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6606                 spin_unlock_bh(&hdev->fd_rule_lock);
6607                 return;
6608         }
6609         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6610                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6611                                         rule->arfs.flow_id, rule->location)) {
6612                         hlist_del_init(&rule->rule_node);
6613                         hlist_add_head(&rule->rule_node, &del_list);
6614                         hdev->hclge_fd_rule_num--;
6615                         clear_bit(rule->location, hdev->fd_bmap);
6616                 }
6617         }
6618         spin_unlock_bh(&hdev->fd_rule_lock);
6619
6620         hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6621                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6622                                      rule->location, NULL, false);
6623                 kfree(rule);
6624         }
6625 #endif
6626 }
6627
6628 /* make sure being called after lock up with fd_rule_lock */
6629 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6630 {
6631 #ifdef CONFIG_RFS_ACCEL
6632         struct hclge_vport *vport = hclge_get_vport(handle);
6633         struct hclge_dev *hdev = vport->back;
6634
6635         if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6636                 hclge_del_all_fd_entries(handle, true);
6637 #endif
6638 }
6639
6640 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
6641                                     struct hclge_fd_rule *rule)
6642 {
6643         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
6644                 struct flow_match_basic match;
6645                 u16 ethtype_key, ethtype_mask;
6646
6647                 flow_rule_match_basic(flow, &match);
6648                 ethtype_key = ntohs(match.key->n_proto);
6649                 ethtype_mask = ntohs(match.mask->n_proto);
6650
6651                 if (ethtype_key == ETH_P_ALL) {
6652                         ethtype_key = 0;
6653                         ethtype_mask = 0;
6654                 }
6655                 rule->tuples.ether_proto = ethtype_key;
6656                 rule->tuples_mask.ether_proto = ethtype_mask;
6657                 rule->tuples.ip_proto = match.key->ip_proto;
6658                 rule->tuples_mask.ip_proto = match.mask->ip_proto;
6659         } else {
6660                 rule->unused_tuple |= BIT(INNER_IP_PROTO);
6661                 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
6662         }
6663 }
6664
6665 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
6666                                   struct hclge_fd_rule *rule)
6667 {
6668         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
6669                 struct flow_match_eth_addrs match;
6670
6671                 flow_rule_match_eth_addrs(flow, &match);
6672                 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
6673                 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
6674                 ether_addr_copy(rule->tuples.src_mac, match.key->src);
6675                 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
6676         } else {
6677                 rule->unused_tuple |= BIT(INNER_DST_MAC);
6678                 rule->unused_tuple |= BIT(INNER_SRC_MAC);
6679         }
6680 }
6681
6682 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
6683                                    struct hclge_fd_rule *rule)
6684 {
6685         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
6686                 struct flow_match_vlan match;
6687
6688                 flow_rule_match_vlan(flow, &match);
6689                 rule->tuples.vlan_tag1 = match.key->vlan_id |
6690                                 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
6691                 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
6692                                 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
6693         } else {
6694                 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6695         }
6696 }
6697
6698 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
6699                                  struct hclge_fd_rule *rule)
6700 {
6701         u16 addr_type = 0;
6702
6703         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
6704                 struct flow_match_control match;
6705
6706                 flow_rule_match_control(flow, &match);
6707                 addr_type = match.key->addr_type;
6708         }
6709
6710         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
6711                 struct flow_match_ipv4_addrs match;
6712
6713                 flow_rule_match_ipv4_addrs(flow, &match);
6714                 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
6715                 rule->tuples_mask.src_ip[IPV4_INDEX] =
6716                                                 be32_to_cpu(match.mask->src);
6717                 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
6718                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6719                                                 be32_to_cpu(match.mask->dst);
6720         } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
6721                 struct flow_match_ipv6_addrs match;
6722
6723                 flow_rule_match_ipv6_addrs(flow, &match);
6724                 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
6725                                   IPV6_SIZE);
6726                 be32_to_cpu_array(rule->tuples_mask.src_ip,
6727                                   match.mask->src.s6_addr32, IPV6_SIZE);
6728                 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
6729                                   IPV6_SIZE);
6730                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
6731                                   match.mask->dst.s6_addr32, IPV6_SIZE);
6732         } else {
6733                 rule->unused_tuple |= BIT(INNER_SRC_IP);
6734                 rule->unused_tuple |= BIT(INNER_DST_IP);
6735         }
6736 }
6737
6738 static void hclge_get_cls_key_port(const struct flow_rule *flow,
6739                                    struct hclge_fd_rule *rule)
6740 {
6741         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
6742                 struct flow_match_ports match;
6743
6744                 flow_rule_match_ports(flow, &match);
6745
6746                 rule->tuples.src_port = be16_to_cpu(match.key->src);
6747                 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
6748                 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
6749                 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
6750         } else {
6751                 rule->unused_tuple |= BIT(INNER_SRC_PORT);
6752                 rule->unused_tuple |= BIT(INNER_DST_PORT);
6753         }
6754 }
6755
6756 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
6757                                   struct flow_cls_offload *cls_flower,
6758                                   struct hclge_fd_rule *rule)
6759 {
6760         struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
6761         struct flow_dissector *dissector = flow->match.dissector;
6762
6763         if (dissector->used_keys &
6764             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
6765               BIT(FLOW_DISSECTOR_KEY_BASIC) |
6766               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
6767               BIT(FLOW_DISSECTOR_KEY_VLAN) |
6768               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
6769               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
6770               BIT(FLOW_DISSECTOR_KEY_PORTS))) {
6771                 dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
6772                         dissector->used_keys);
6773                 return -EOPNOTSUPP;
6774         }
6775
6776         hclge_get_cls_key_basic(flow, rule);
6777         hclge_get_cls_key_mac(flow, rule);
6778         hclge_get_cls_key_vlan(flow, rule);
6779         hclge_get_cls_key_ip(flow, rule);
6780         hclge_get_cls_key_port(flow, rule);
6781
6782         return 0;
6783 }
6784
6785 static int hclge_check_cls_flower(struct hclge_dev *hdev,
6786                                   struct flow_cls_offload *cls_flower, int tc)
6787 {
6788         u32 prio = cls_flower->common.prio;
6789
6790         if (tc < 0 || tc > hdev->tc_max) {
6791                 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
6792                 return -EINVAL;
6793         }
6794
6795         if (prio == 0 ||
6796             prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6797                 dev_err(&hdev->pdev->dev,
6798                         "prio %u should be in range[1, %u]\n",
6799                         prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6800                 return -EINVAL;
6801         }
6802
6803         if (test_bit(prio - 1, hdev->fd_bmap)) {
6804                 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
6805                 return -EINVAL;
6806         }
6807         return 0;
6808 }
6809
6810 static int hclge_add_cls_flower(struct hnae3_handle *handle,
6811                                 struct flow_cls_offload *cls_flower,
6812                                 int tc)
6813 {
6814         struct hclge_vport *vport = hclge_get_vport(handle);
6815         struct hclge_dev *hdev = vport->back;
6816         struct hclge_fd_rule *rule;
6817         int ret;
6818
6819         if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6820                 dev_err(&hdev->pdev->dev,
6821                         "please remove all exist fd rules via ethtool first\n");
6822                 return -EINVAL;
6823         }
6824
6825         ret = hclge_check_cls_flower(hdev, cls_flower, tc);
6826         if (ret) {
6827                 dev_err(&hdev->pdev->dev,
6828                         "failed to check cls flower params, ret = %d\n", ret);
6829                 return ret;
6830         }
6831
6832         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6833         if (!rule)
6834                 return -ENOMEM;
6835
6836         ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
6837         if (ret)
6838                 goto err;
6839
6840         rule->action = HCLGE_FD_ACTION_SELECT_TC;
6841         rule->cls_flower.tc = tc;
6842         rule->location = cls_flower->common.prio - 1;
6843         rule->vf_id = 0;
6844         rule->cls_flower.cookie = cls_flower->cookie;
6845         rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
6846
6847         spin_lock_bh(&hdev->fd_rule_lock);
6848         hclge_clear_arfs_rules(handle);
6849
6850         ret = hclge_fd_config_rule(hdev, rule);
6851
6852         spin_unlock_bh(&hdev->fd_rule_lock);
6853
6854         if (ret) {
6855                 dev_err(&hdev->pdev->dev,
6856                         "failed to add cls flower rule, ret = %d\n", ret);
6857                 goto err;
6858         }
6859
6860         return 0;
6861 err:
6862         kfree(rule);
6863         return ret;
6864 }
6865
6866 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
6867                                                    unsigned long cookie)
6868 {
6869         struct hclge_fd_rule *rule;
6870         struct hlist_node *node;
6871
6872         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6873                 if (rule->cls_flower.cookie == cookie)
6874                         return rule;
6875         }
6876
6877         return NULL;
6878 }
6879
6880 static int hclge_del_cls_flower(struct hnae3_handle *handle,
6881                                 struct flow_cls_offload *cls_flower)
6882 {
6883         struct hclge_vport *vport = hclge_get_vport(handle);
6884         struct hclge_dev *hdev = vport->back;
6885         struct hclge_fd_rule *rule;
6886         int ret;
6887
6888         spin_lock_bh(&hdev->fd_rule_lock);
6889
6890         rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
6891         if (!rule) {
6892                 spin_unlock_bh(&hdev->fd_rule_lock);
6893                 return -EINVAL;
6894         }
6895
6896         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
6897                                    NULL, false);
6898         if (ret) {
6899                 dev_err(&hdev->pdev->dev,
6900                         "failed to delete cls flower rule %u, ret = %d\n",
6901                         rule->location, ret);
6902                 spin_unlock_bh(&hdev->fd_rule_lock);
6903                 return ret;
6904         }
6905
6906         ret = hclge_fd_update_rule_list(hdev, NULL, rule->location, false);
6907         if (ret) {
6908                 dev_err(&hdev->pdev->dev,
6909                         "failed to delete cls flower rule %u in list, ret = %d\n",
6910                         rule->location, ret);
6911                 spin_unlock_bh(&hdev->fd_rule_lock);
6912                 return ret;
6913         }
6914
6915         spin_unlock_bh(&hdev->fd_rule_lock);
6916
6917         return 0;
6918 }
6919
6920 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6921 {
6922         struct hclge_vport *vport = hclge_get_vport(handle);
6923         struct hclge_dev *hdev = vport->back;
6924
6925         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6926                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6927 }
6928
6929 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
6930 {
6931         struct hclge_vport *vport = hclge_get_vport(handle);
6932         struct hclge_dev *hdev = vport->back;
6933
6934         return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
6935 }
6936
6937 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6938 {
6939         struct hclge_vport *vport = hclge_get_vport(handle);
6940         struct hclge_dev *hdev = vport->back;
6941
6942         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6943 }
6944
6945 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6946 {
6947         struct hclge_vport *vport = hclge_get_vport(handle);
6948         struct hclge_dev *hdev = vport->back;
6949
6950         return hdev->rst_stats.hw_reset_done_cnt;
6951 }
6952
6953 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6954 {
6955         struct hclge_vport *vport = hclge_get_vport(handle);
6956         struct hclge_dev *hdev = vport->back;
6957         bool clear;
6958
6959         hdev->fd_en = enable;
6960         clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6961
6962         if (!enable) {
6963                 spin_lock_bh(&hdev->fd_rule_lock);
6964                 hclge_del_all_fd_entries(handle, clear);
6965                 spin_unlock_bh(&hdev->fd_rule_lock);
6966         } else {
6967                 hclge_restore_fd_entries(handle);
6968         }
6969 }
6970
6971 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6972 {
6973         struct hclge_desc desc;
6974         struct hclge_config_mac_mode_cmd *req =
6975                 (struct hclge_config_mac_mode_cmd *)desc.data;
6976         u32 loop_en = 0;
6977         int ret;
6978
6979         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6980
6981         if (enable) {
6982                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6983                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6984                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6985                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6986                 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6987                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6988                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6989                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6990                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6991                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6992         }
6993
6994         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6995
6996         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6997         if (ret)
6998                 dev_err(&hdev->pdev->dev,
6999                         "mac enable fail, ret =%d.\n", ret);
7000 }
7001
7002 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7003                                      u8 switch_param, u8 param_mask)
7004 {
7005         struct hclge_mac_vlan_switch_cmd *req;
7006         struct hclge_desc desc;
7007         u32 func_id;
7008         int ret;
7009
7010         func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7011         req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7012
7013         /* read current config parameter */
7014         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7015                                    true);
7016         req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7017         req->func_id = cpu_to_le32(func_id);
7018
7019         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7020         if (ret) {
7021                 dev_err(&hdev->pdev->dev,
7022                         "read mac vlan switch parameter fail, ret = %d\n", ret);
7023                 return ret;
7024         }
7025
7026         /* modify and write new config parameter */
7027         hclge_cmd_reuse_desc(&desc, false);
7028         req->switch_param = (req->switch_param & param_mask) | switch_param;
7029         req->param_mask = param_mask;
7030
7031         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7032         if (ret)
7033                 dev_err(&hdev->pdev->dev,
7034                         "set mac vlan switch parameter fail, ret = %d\n", ret);
7035         return ret;
7036 }
7037
7038 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7039                                        int link_ret)
7040 {
7041 #define HCLGE_PHY_LINK_STATUS_NUM  200
7042
7043         struct phy_device *phydev = hdev->hw.mac.phydev;
7044         int i = 0;
7045         int ret;
7046
7047         do {
7048                 ret = phy_read_status(phydev);
7049                 if (ret) {
7050                         dev_err(&hdev->pdev->dev,
7051                                 "phy update link status fail, ret = %d\n", ret);
7052                         return;
7053                 }
7054
7055                 if (phydev->link == link_ret)
7056                         break;
7057
7058                 msleep(HCLGE_LINK_STATUS_MS);
7059         } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7060 }
7061
7062 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7063 {
7064 #define HCLGE_MAC_LINK_STATUS_NUM  100
7065
7066         int link_status;
7067         int i = 0;
7068         int ret;
7069
7070         do {
7071                 ret = hclge_get_mac_link_status(hdev, &link_status);
7072                 if (ret)
7073                         return ret;
7074                 if (link_status == link_ret)
7075                         return 0;
7076
7077                 msleep(HCLGE_LINK_STATUS_MS);
7078         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7079         return -EBUSY;
7080 }
7081
7082 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7083                                           bool is_phy)
7084 {
7085         int link_ret;
7086
7087         link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7088
7089         if (is_phy)
7090                 hclge_phy_link_status_wait(hdev, link_ret);
7091
7092         return hclge_mac_link_status_wait(hdev, link_ret);
7093 }
7094
7095 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7096 {
7097         struct hclge_config_mac_mode_cmd *req;
7098         struct hclge_desc desc;
7099         u32 loop_en;
7100         int ret;
7101
7102         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7103         /* 1 Read out the MAC mode config at first */
7104         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7105         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7106         if (ret) {
7107                 dev_err(&hdev->pdev->dev,
7108                         "mac loopback get fail, ret =%d.\n", ret);
7109                 return ret;
7110         }
7111
7112         /* 2 Then setup the loopback flag */
7113         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7114         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7115
7116         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7117
7118         /* 3 Config mac work mode with loopback flag
7119          * and its original configure parameters
7120          */
7121         hclge_cmd_reuse_desc(&desc, false);
7122         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7123         if (ret)
7124                 dev_err(&hdev->pdev->dev,
7125                         "mac loopback set fail, ret =%d.\n", ret);
7126         return ret;
7127 }
7128
7129 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
7130                                      enum hnae3_loop loop_mode)
7131 {
7132 #define HCLGE_SERDES_RETRY_MS   10
7133 #define HCLGE_SERDES_RETRY_NUM  100
7134
7135         struct hclge_serdes_lb_cmd *req;
7136         struct hclge_desc desc;
7137         int ret, i = 0;
7138         u8 loop_mode_b;
7139
7140         req = (struct hclge_serdes_lb_cmd *)desc.data;
7141         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
7142
7143         switch (loop_mode) {
7144         case HNAE3_LOOP_SERIAL_SERDES:
7145                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7146                 break;
7147         case HNAE3_LOOP_PARALLEL_SERDES:
7148                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7149                 break;
7150         default:
7151                 dev_err(&hdev->pdev->dev,
7152                         "unsupported serdes loopback mode %d\n", loop_mode);
7153                 return -ENOTSUPP;
7154         }
7155
7156         if (en) {
7157                 req->enable = loop_mode_b;
7158                 req->mask = loop_mode_b;
7159         } else {
7160                 req->mask = loop_mode_b;
7161         }
7162
7163         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7164         if (ret) {
7165                 dev_err(&hdev->pdev->dev,
7166                         "serdes loopback set fail, ret = %d\n", ret);
7167                 return ret;
7168         }
7169
7170         do {
7171                 msleep(HCLGE_SERDES_RETRY_MS);
7172                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
7173                                            true);
7174                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7175                 if (ret) {
7176                         dev_err(&hdev->pdev->dev,
7177                                 "serdes loopback get, ret = %d\n", ret);
7178                         return ret;
7179                 }
7180         } while (++i < HCLGE_SERDES_RETRY_NUM &&
7181                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
7182
7183         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
7184                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
7185                 return -EBUSY;
7186         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
7187                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
7188                 return -EIO;
7189         }
7190         return ret;
7191 }
7192
7193 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
7194                                      enum hnae3_loop loop_mode)
7195 {
7196         int ret;
7197
7198         ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
7199         if (ret)
7200                 return ret;
7201
7202         hclge_cfg_mac_mode(hdev, en);
7203
7204         ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7205         if (ret)
7206                 dev_err(&hdev->pdev->dev,
7207                         "serdes loopback config mac mode timeout\n");
7208
7209         return ret;
7210 }
7211
7212 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7213                                      struct phy_device *phydev)
7214 {
7215         int ret;
7216
7217         if (!phydev->suspended) {
7218                 ret = phy_suspend(phydev);
7219                 if (ret)
7220                         return ret;
7221         }
7222
7223         ret = phy_resume(phydev);
7224         if (ret)
7225                 return ret;
7226
7227         return phy_loopback(phydev, true);
7228 }
7229
7230 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7231                                       struct phy_device *phydev)
7232 {
7233         int ret;
7234
7235         ret = phy_loopback(phydev, false);
7236         if (ret)
7237                 return ret;
7238
7239         return phy_suspend(phydev);
7240 }
7241
7242 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7243 {
7244         struct phy_device *phydev = hdev->hw.mac.phydev;
7245         int ret;
7246
7247         if (!phydev)
7248                 return -ENOTSUPP;
7249
7250         if (en)
7251                 ret = hclge_enable_phy_loopback(hdev, phydev);
7252         else
7253                 ret = hclge_disable_phy_loopback(hdev, phydev);
7254         if (ret) {
7255                 dev_err(&hdev->pdev->dev,
7256                         "set phy loopback fail, ret = %d\n", ret);
7257                 return ret;
7258         }
7259
7260         hclge_cfg_mac_mode(hdev, en);
7261
7262         ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7263         if (ret)
7264                 dev_err(&hdev->pdev->dev,
7265                         "phy loopback config mac mode timeout\n");
7266
7267         return ret;
7268 }
7269
7270 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
7271                             int stream_id, bool enable)
7272 {
7273         struct hclge_desc desc;
7274         struct hclge_cfg_com_tqp_queue_cmd *req =
7275                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7276         int ret;
7277
7278         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7279         req->tqp_id = cpu_to_le16(tqp_id);
7280         req->stream_id = cpu_to_le16(stream_id);
7281         if (enable)
7282                 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7283
7284         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7285         if (ret)
7286                 dev_err(&hdev->pdev->dev,
7287                         "Tqp enable fail, status =%d.\n", ret);
7288         return ret;
7289 }
7290
7291 static int hclge_set_loopback(struct hnae3_handle *handle,
7292                               enum hnae3_loop loop_mode, bool en)
7293 {
7294         struct hclge_vport *vport = hclge_get_vport(handle);
7295         struct hnae3_knic_private_info *kinfo;
7296         struct hclge_dev *hdev = vport->back;
7297         int i, ret;
7298
7299         /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7300          * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7301          * the same, the packets are looped back in the SSU. If SSU loopback
7302          * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7303          */
7304         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7305                 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7306
7307                 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7308                                                 HCLGE_SWITCH_ALW_LPBK_MASK);
7309                 if (ret)
7310                         return ret;
7311         }
7312
7313         switch (loop_mode) {
7314         case HNAE3_LOOP_APP:
7315                 ret = hclge_set_app_loopback(hdev, en);
7316                 break;
7317         case HNAE3_LOOP_SERIAL_SERDES:
7318         case HNAE3_LOOP_PARALLEL_SERDES:
7319                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
7320                 break;
7321         case HNAE3_LOOP_PHY:
7322                 ret = hclge_set_phy_loopback(hdev, en);
7323                 break;
7324         default:
7325                 ret = -ENOTSUPP;
7326                 dev_err(&hdev->pdev->dev,
7327                         "loop_mode %d is not supported\n", loop_mode);
7328                 break;
7329         }
7330
7331         if (ret)
7332                 return ret;
7333
7334         kinfo = &vport->nic.kinfo;
7335         for (i = 0; i < kinfo->num_tqps; i++) {
7336                 ret = hclge_tqp_enable(hdev, i, 0, en);
7337                 if (ret)
7338                         return ret;
7339         }
7340
7341         return 0;
7342 }
7343
7344 static int hclge_set_default_loopback(struct hclge_dev *hdev)
7345 {
7346         int ret;
7347
7348         ret = hclge_set_app_loopback(hdev, false);
7349         if (ret)
7350                 return ret;
7351
7352         ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
7353         if (ret)
7354                 return ret;
7355
7356         return hclge_cfg_serdes_loopback(hdev, false,
7357                                          HNAE3_LOOP_PARALLEL_SERDES);
7358 }
7359
7360 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
7361 {
7362         struct hclge_vport *vport = hclge_get_vport(handle);
7363         struct hnae3_knic_private_info *kinfo;
7364         struct hnae3_queue *queue;
7365         struct hclge_tqp *tqp;
7366         int i;
7367
7368         kinfo = &vport->nic.kinfo;
7369         for (i = 0; i < kinfo->num_tqps; i++) {
7370                 queue = handle->kinfo.tqp[i];
7371                 tqp = container_of(queue, struct hclge_tqp, q);
7372                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
7373         }
7374 }
7375
7376 static void hclge_flush_link_update(struct hclge_dev *hdev)
7377 {
7378 #define HCLGE_FLUSH_LINK_TIMEOUT        100000
7379
7380         unsigned long last = hdev->serv_processed_cnt;
7381         int i = 0;
7382
7383         while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
7384                i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
7385                last == hdev->serv_processed_cnt)
7386                 usleep_range(1, 1);
7387 }
7388
7389 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
7390 {
7391         struct hclge_vport *vport = hclge_get_vport(handle);
7392         struct hclge_dev *hdev = vport->back;
7393
7394         if (enable) {
7395                 hclge_task_schedule(hdev, 0);
7396         } else {
7397                 /* Set the DOWN flag here to disable link updating */
7398                 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7399
7400                 /* flush memory to make sure DOWN is seen by service task */
7401                 smp_mb__before_atomic();
7402                 hclge_flush_link_update(hdev);
7403         }
7404 }
7405
7406 static int hclge_ae_start(struct hnae3_handle *handle)
7407 {
7408         struct hclge_vport *vport = hclge_get_vport(handle);
7409         struct hclge_dev *hdev = vport->back;
7410
7411         /* mac enable */
7412         hclge_cfg_mac_mode(hdev, true);
7413         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
7414         hdev->hw.mac.link = 0;
7415
7416         /* reset tqp stats */
7417         hclge_reset_tqp_stats(handle);
7418
7419         hclge_mac_start_phy(hdev);
7420
7421         return 0;
7422 }
7423
7424 static void hclge_ae_stop(struct hnae3_handle *handle)
7425 {
7426         struct hclge_vport *vport = hclge_get_vport(handle);
7427         struct hclge_dev *hdev = vport->back;
7428         int i;
7429
7430         set_bit(HCLGE_STATE_DOWN, &hdev->state);
7431         spin_lock_bh(&hdev->fd_rule_lock);
7432         hclge_clear_arfs_rules(handle);
7433         spin_unlock_bh(&hdev->fd_rule_lock);
7434
7435         /* If it is not PF reset, the firmware will disable the MAC,
7436          * so it only need to stop phy here.
7437          */
7438         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
7439             hdev->reset_type != HNAE3_FUNC_RESET) {
7440                 hclge_mac_stop_phy(hdev);
7441                 hclge_update_link_status(hdev);
7442                 return;
7443         }
7444
7445         for (i = 0; i < handle->kinfo.num_tqps; i++)
7446                 hclge_reset_tqp(handle, i);
7447
7448         hclge_config_mac_tnl_int(hdev, false);
7449
7450         /* Mac disable */
7451         hclge_cfg_mac_mode(hdev, false);
7452
7453         hclge_mac_stop_phy(hdev);
7454
7455         /* reset tqp stats */
7456         hclge_reset_tqp_stats(handle);
7457         hclge_update_link_status(hdev);
7458 }
7459
7460 int hclge_vport_start(struct hclge_vport *vport)
7461 {
7462         struct hclge_dev *hdev = vport->back;
7463
7464         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7465         vport->last_active_jiffies = jiffies;
7466
7467         if (test_bit(vport->vport_id, hdev->vport_config_block)) {
7468                 if (vport->vport_id) {
7469                         hclge_restore_mac_table_common(vport);
7470                         hclge_restore_vport_vlan_table(vport);
7471                 } else {
7472                         hclge_restore_hw_table(hdev);
7473                 }
7474         }
7475
7476         clear_bit(vport->vport_id, hdev->vport_config_block);
7477
7478         return 0;
7479 }
7480
7481 void hclge_vport_stop(struct hclge_vport *vport)
7482 {
7483         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7484 }
7485
7486 static int hclge_client_start(struct hnae3_handle *handle)
7487 {
7488         struct hclge_vport *vport = hclge_get_vport(handle);
7489
7490         return hclge_vport_start(vport);
7491 }
7492
7493 static void hclge_client_stop(struct hnae3_handle *handle)
7494 {
7495         struct hclge_vport *vport = hclge_get_vport(handle);
7496
7497         hclge_vport_stop(vport);
7498 }
7499
7500 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
7501                                          u16 cmdq_resp, u8  resp_code,
7502                                          enum hclge_mac_vlan_tbl_opcode op)
7503 {
7504         struct hclge_dev *hdev = vport->back;
7505
7506         if (cmdq_resp) {
7507                 dev_err(&hdev->pdev->dev,
7508                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
7509                         cmdq_resp);
7510                 return -EIO;
7511         }
7512
7513         if (op == HCLGE_MAC_VLAN_ADD) {
7514                 if (!resp_code || resp_code == 1)
7515                         return 0;
7516                 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
7517                          resp_code == HCLGE_ADD_MC_OVERFLOW)
7518                         return -ENOSPC;
7519
7520                 dev_err(&hdev->pdev->dev,
7521                         "add mac addr failed for undefined, code=%u.\n",
7522                         resp_code);
7523                 return -EIO;
7524         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
7525                 if (!resp_code) {
7526                         return 0;
7527                 } else if (resp_code == 1) {
7528                         dev_dbg(&hdev->pdev->dev,
7529                                 "remove mac addr failed for miss.\n");
7530                         return -ENOENT;
7531                 }
7532
7533                 dev_err(&hdev->pdev->dev,
7534                         "remove mac addr failed for undefined, code=%u.\n",
7535                         resp_code);
7536                 return -EIO;
7537         } else if (op == HCLGE_MAC_VLAN_LKUP) {
7538                 if (!resp_code) {
7539                         return 0;
7540                 } else if (resp_code == 1) {
7541                         dev_dbg(&hdev->pdev->dev,
7542                                 "lookup mac addr failed for miss.\n");
7543                         return -ENOENT;
7544                 }
7545
7546                 dev_err(&hdev->pdev->dev,
7547                         "lookup mac addr failed for undefined, code=%u.\n",
7548                         resp_code);
7549                 return -EIO;
7550         }
7551
7552         dev_err(&hdev->pdev->dev,
7553                 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7554
7555         return -EINVAL;
7556 }
7557
7558 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7559 {
7560 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
7561
7562         unsigned int word_num;
7563         unsigned int bit_num;
7564
7565         if (vfid > 255 || vfid < 0)
7566                 return -EIO;
7567
7568         if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
7569                 word_num = vfid / 32;
7570                 bit_num  = vfid % 32;
7571                 if (clr)
7572                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7573                 else
7574                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
7575         } else {
7576                 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
7577                 bit_num  = vfid % 32;
7578                 if (clr)
7579                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7580                 else
7581                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
7582         }
7583
7584         return 0;
7585 }
7586
7587 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7588 {
7589 #define HCLGE_DESC_NUMBER 3
7590 #define HCLGE_FUNC_NUMBER_PER_DESC 6
7591         int i, j;
7592
7593         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
7594                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7595                         if (desc[i].data[j])
7596                                 return false;
7597
7598         return true;
7599 }
7600
7601 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7602                                    const u8 *addr, bool is_mc)
7603 {
7604         const unsigned char *mac_addr = addr;
7605         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7606                        (mac_addr[0]) | (mac_addr[1] << 8);
7607         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
7608
7609         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7610         if (is_mc) {
7611                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7612                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7613         }
7614
7615         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7616         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7617 }
7618
7619 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7620                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
7621 {
7622         struct hclge_dev *hdev = vport->back;
7623         struct hclge_desc desc;
7624         u8 resp_code;
7625         u16 retval;
7626         int ret;
7627
7628         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7629
7630         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7631
7632         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7633         if (ret) {
7634                 dev_err(&hdev->pdev->dev,
7635                         "del mac addr failed for cmd_send, ret =%d.\n",
7636                         ret);
7637                 return ret;
7638         }
7639         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7640         retval = le16_to_cpu(desc.retval);
7641
7642         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7643                                              HCLGE_MAC_VLAN_REMOVE);
7644 }
7645
7646 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7647                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
7648                                      struct hclge_desc *desc,
7649                                      bool is_mc)
7650 {
7651         struct hclge_dev *hdev = vport->back;
7652         u8 resp_code;
7653         u16 retval;
7654         int ret;
7655
7656         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7657         if (is_mc) {
7658                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7659                 memcpy(desc[0].data,
7660                        req,
7661                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7662                 hclge_cmd_setup_basic_desc(&desc[1],
7663                                            HCLGE_OPC_MAC_VLAN_ADD,
7664                                            true);
7665                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7666                 hclge_cmd_setup_basic_desc(&desc[2],
7667                                            HCLGE_OPC_MAC_VLAN_ADD,
7668                                            true);
7669                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7670         } else {
7671                 memcpy(desc[0].data,
7672                        req,
7673                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7674                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7675         }
7676         if (ret) {
7677                 dev_err(&hdev->pdev->dev,
7678                         "lookup mac addr failed for cmd_send, ret =%d.\n",
7679                         ret);
7680                 return ret;
7681         }
7682         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7683         retval = le16_to_cpu(desc[0].retval);
7684
7685         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7686                                              HCLGE_MAC_VLAN_LKUP);
7687 }
7688
7689 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7690                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
7691                                   struct hclge_desc *mc_desc)
7692 {
7693         struct hclge_dev *hdev = vport->back;
7694         int cfg_status;
7695         u8 resp_code;
7696         u16 retval;
7697         int ret;
7698
7699         if (!mc_desc) {
7700                 struct hclge_desc desc;
7701
7702                 hclge_cmd_setup_basic_desc(&desc,
7703                                            HCLGE_OPC_MAC_VLAN_ADD,
7704                                            false);
7705                 memcpy(desc.data, req,
7706                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7707                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7708                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7709                 retval = le16_to_cpu(desc.retval);
7710
7711                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7712                                                            resp_code,
7713                                                            HCLGE_MAC_VLAN_ADD);
7714         } else {
7715                 hclge_cmd_reuse_desc(&mc_desc[0], false);
7716                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7717                 hclge_cmd_reuse_desc(&mc_desc[1], false);
7718                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7719                 hclge_cmd_reuse_desc(&mc_desc[2], false);
7720                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7721                 memcpy(mc_desc[0].data, req,
7722                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7723                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7724                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7725                 retval = le16_to_cpu(mc_desc[0].retval);
7726
7727                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7728                                                            resp_code,
7729                                                            HCLGE_MAC_VLAN_ADD);
7730         }
7731
7732         if (ret) {
7733                 dev_err(&hdev->pdev->dev,
7734                         "add mac addr failed for cmd_send, ret =%d.\n",
7735                         ret);
7736                 return ret;
7737         }
7738
7739         return cfg_status;
7740 }
7741
7742 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7743                                u16 *allocated_size)
7744 {
7745         struct hclge_umv_spc_alc_cmd *req;
7746         struct hclge_desc desc;
7747         int ret;
7748
7749         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7750         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7751
7752         req->space_size = cpu_to_le32(space_size);
7753
7754         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7755         if (ret) {
7756                 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7757                         ret);
7758                 return ret;
7759         }
7760
7761         *allocated_size = le32_to_cpu(desc.data[1]);
7762
7763         return 0;
7764 }
7765
7766 static int hclge_init_umv_space(struct hclge_dev *hdev)
7767 {
7768         u16 allocated_size = 0;
7769         int ret;
7770
7771         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
7772         if (ret)
7773                 return ret;
7774
7775         if (allocated_size < hdev->wanted_umv_size)
7776                 dev_warn(&hdev->pdev->dev,
7777                          "failed to alloc umv space, want %u, get %u\n",
7778                          hdev->wanted_umv_size, allocated_size);
7779
7780         hdev->max_umv_size = allocated_size;
7781         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7782         hdev->share_umv_size = hdev->priv_umv_size +
7783                         hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7784
7785         return 0;
7786 }
7787
7788 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7789 {
7790         struct hclge_vport *vport;
7791         int i;
7792
7793         for (i = 0; i < hdev->num_alloc_vport; i++) {
7794                 vport = &hdev->vport[i];
7795                 vport->used_umv_num = 0;
7796         }
7797
7798         mutex_lock(&hdev->vport_lock);
7799         hdev->share_umv_size = hdev->priv_umv_size +
7800                         hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7801         mutex_unlock(&hdev->vport_lock);
7802 }
7803
7804 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
7805 {
7806         struct hclge_dev *hdev = vport->back;
7807         bool is_full;
7808
7809         if (need_lock)
7810                 mutex_lock(&hdev->vport_lock);
7811
7812         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7813                    hdev->share_umv_size == 0);
7814
7815         if (need_lock)
7816                 mutex_unlock(&hdev->vport_lock);
7817
7818         return is_full;
7819 }
7820
7821 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7822 {
7823         struct hclge_dev *hdev = vport->back;
7824
7825         if (is_free) {
7826                 if (vport->used_umv_num > hdev->priv_umv_size)
7827                         hdev->share_umv_size++;
7828
7829                 if (vport->used_umv_num > 0)
7830                         vport->used_umv_num--;
7831         } else {
7832                 if (vport->used_umv_num >= hdev->priv_umv_size &&
7833                     hdev->share_umv_size > 0)
7834                         hdev->share_umv_size--;
7835                 vport->used_umv_num++;
7836         }
7837 }
7838
7839 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7840                                                   const u8 *mac_addr)
7841 {
7842         struct hclge_mac_node *mac_node, *tmp;
7843
7844         list_for_each_entry_safe(mac_node, tmp, list, node)
7845                 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7846                         return mac_node;
7847
7848         return NULL;
7849 }
7850
7851 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
7852                                   enum HCLGE_MAC_NODE_STATE state)
7853 {
7854         switch (state) {
7855         /* from set_rx_mode or tmp_add_list */
7856         case HCLGE_MAC_TO_ADD:
7857                 if (mac_node->state == HCLGE_MAC_TO_DEL)
7858                         mac_node->state = HCLGE_MAC_ACTIVE;
7859                 break;
7860         /* only from set_rx_mode */
7861         case HCLGE_MAC_TO_DEL:
7862                 if (mac_node->state == HCLGE_MAC_TO_ADD) {
7863                         list_del(&mac_node->node);
7864                         kfree(mac_node);
7865                 } else {
7866                         mac_node->state = HCLGE_MAC_TO_DEL;
7867                 }
7868                 break;
7869         /* only from tmp_add_list, the mac_node->state won't be
7870          * ACTIVE.
7871          */
7872         case HCLGE_MAC_ACTIVE:
7873                 if (mac_node->state == HCLGE_MAC_TO_ADD)
7874                         mac_node->state = HCLGE_MAC_ACTIVE;
7875
7876                 break;
7877         }
7878 }
7879
7880 int hclge_update_mac_list(struct hclge_vport *vport,
7881                           enum HCLGE_MAC_NODE_STATE state,
7882                           enum HCLGE_MAC_ADDR_TYPE mac_type,
7883                           const unsigned char *addr)
7884 {
7885         struct hclge_dev *hdev = vport->back;
7886         struct hclge_mac_node *mac_node;
7887         struct list_head *list;
7888
7889         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7890                 &vport->uc_mac_list : &vport->mc_mac_list;
7891
7892         spin_lock_bh(&vport->mac_list_lock);
7893
7894         /* if the mac addr is already in the mac list, no need to add a new
7895          * one into it, just check the mac addr state, convert it to a new
7896          * new state, or just remove it, or do nothing.
7897          */
7898         mac_node = hclge_find_mac_node(list, addr);
7899         if (mac_node) {
7900                 hclge_update_mac_node(mac_node, state);
7901                 spin_unlock_bh(&vport->mac_list_lock);
7902                 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7903                 return 0;
7904         }
7905
7906         /* if this address is never added, unnecessary to delete */
7907         if (state == HCLGE_MAC_TO_DEL) {
7908                 spin_unlock_bh(&vport->mac_list_lock);
7909                 dev_err(&hdev->pdev->dev,
7910                         "failed to delete address %pM from mac list\n",
7911                         addr);
7912                 return -ENOENT;
7913         }
7914
7915         mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
7916         if (!mac_node) {
7917                 spin_unlock_bh(&vport->mac_list_lock);
7918                 return -ENOMEM;
7919         }
7920
7921         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7922
7923         mac_node->state = state;
7924         ether_addr_copy(mac_node->mac_addr, addr);
7925         list_add_tail(&mac_node->node, list);
7926
7927         spin_unlock_bh(&vport->mac_list_lock);
7928
7929         return 0;
7930 }
7931
7932 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7933                              const unsigned char *addr)
7934 {
7935         struct hclge_vport *vport = hclge_get_vport(handle);
7936
7937         return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
7938                                      addr);
7939 }
7940
7941 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7942                              const unsigned char *addr)
7943 {
7944         struct hclge_dev *hdev = vport->back;
7945         struct hclge_mac_vlan_tbl_entry_cmd req;
7946         struct hclge_desc desc;
7947         u16 egress_port = 0;
7948         int ret;
7949
7950         /* mac addr check */
7951         if (is_zero_ether_addr(addr) ||
7952             is_broadcast_ether_addr(addr) ||
7953             is_multicast_ether_addr(addr)) {
7954                 dev_err(&hdev->pdev->dev,
7955                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7956                          addr, is_zero_ether_addr(addr),
7957                          is_broadcast_ether_addr(addr),
7958                          is_multicast_ether_addr(addr));
7959                 return -EINVAL;
7960         }
7961
7962         memset(&req, 0, sizeof(req));
7963
7964         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7965                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7966
7967         req.egress_port = cpu_to_le16(egress_port);
7968
7969         hclge_prepare_mac_addr(&req, addr, false);
7970
7971         /* Lookup the mac address in the mac_vlan table, and add
7972          * it if the entry is inexistent. Repeated unicast entry
7973          * is not allowed in the mac vlan table.
7974          */
7975         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7976         if (ret == -ENOENT) {
7977                 mutex_lock(&hdev->vport_lock);
7978                 if (!hclge_is_umv_space_full(vport, false)) {
7979                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7980                         if (!ret)
7981                                 hclge_update_umv_space(vport, false);
7982                         mutex_unlock(&hdev->vport_lock);
7983                         return ret;
7984                 }
7985                 mutex_unlock(&hdev->vport_lock);
7986
7987                 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
7988                         dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7989                                 hdev->priv_umv_size);
7990
7991                 return -ENOSPC;
7992         }
7993
7994         /* check if we just hit the duplicate */
7995         if (!ret) {
7996                 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7997                          vport->vport_id, addr);
7998                 return 0;
7999         }
8000
8001         dev_err(&hdev->pdev->dev,
8002                 "PF failed to add unicast entry(%pM) in the MAC table\n",
8003                 addr);
8004
8005         return ret;
8006 }
8007
8008 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8009                             const unsigned char *addr)
8010 {
8011         struct hclge_vport *vport = hclge_get_vport(handle);
8012
8013         return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8014                                      addr);
8015 }
8016
8017 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8018                             const unsigned char *addr)
8019 {
8020         struct hclge_dev *hdev = vport->back;
8021         struct hclge_mac_vlan_tbl_entry_cmd req;
8022         int ret;
8023
8024         /* mac addr check */
8025         if (is_zero_ether_addr(addr) ||
8026             is_broadcast_ether_addr(addr) ||
8027             is_multicast_ether_addr(addr)) {
8028                 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8029                         addr);
8030                 return -EINVAL;
8031         }
8032
8033         memset(&req, 0, sizeof(req));
8034         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8035         hclge_prepare_mac_addr(&req, addr, false);
8036         ret = hclge_remove_mac_vlan_tbl(vport, &req);
8037         if (!ret) {
8038                 mutex_lock(&hdev->vport_lock);
8039                 hclge_update_umv_space(vport, true);
8040                 mutex_unlock(&hdev->vport_lock);
8041         } else if (ret == -ENOENT) {
8042                 ret = 0;
8043         }
8044
8045         return ret;
8046 }
8047
8048 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8049                              const unsigned char *addr)
8050 {
8051         struct hclge_vport *vport = hclge_get_vport(handle);
8052
8053         return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8054                                      addr);
8055 }
8056
8057 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8058                              const unsigned char *addr)
8059 {
8060         struct hclge_dev *hdev = vport->back;
8061         struct hclge_mac_vlan_tbl_entry_cmd req;
8062         struct hclge_desc desc[3];
8063         int status;
8064
8065         /* mac addr check */
8066         if (!is_multicast_ether_addr(addr)) {
8067                 dev_err(&hdev->pdev->dev,
8068                         "Add mc mac err! invalid mac:%pM.\n",
8069                          addr);
8070                 return -EINVAL;
8071         }
8072         memset(&req, 0, sizeof(req));
8073         hclge_prepare_mac_addr(&req, addr, true);
8074         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8075         if (status) {
8076                 /* This mac addr do not exist, add new entry for it */
8077                 memset(desc[0].data, 0, sizeof(desc[0].data));
8078                 memset(desc[1].data, 0, sizeof(desc[0].data));
8079                 memset(desc[2].data, 0, sizeof(desc[0].data));
8080         }
8081         status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8082         if (status)
8083                 return status;
8084         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8085
8086         /* if already overflow, not to print each time */
8087         if (status == -ENOSPC &&
8088             !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8089                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8090
8091         return status;
8092 }
8093
8094 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8095                             const unsigned char *addr)
8096 {
8097         struct hclge_vport *vport = hclge_get_vport(handle);
8098
8099         return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8100                                      addr);
8101 }
8102
8103 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8104                             const unsigned char *addr)
8105 {
8106         struct hclge_dev *hdev = vport->back;
8107         struct hclge_mac_vlan_tbl_entry_cmd req;
8108         enum hclge_cmd_status status;
8109         struct hclge_desc desc[3];
8110
8111         /* mac addr check */
8112         if (!is_multicast_ether_addr(addr)) {
8113                 dev_dbg(&hdev->pdev->dev,
8114                         "Remove mc mac err! invalid mac:%pM.\n",
8115                          addr);
8116                 return -EINVAL;
8117         }
8118
8119         memset(&req, 0, sizeof(req));
8120         hclge_prepare_mac_addr(&req, addr, true);
8121         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8122         if (!status) {
8123                 /* This mac addr exist, remove this handle's VFID for it */
8124                 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8125                 if (status)
8126                         return status;
8127
8128                 if (hclge_is_all_function_id_zero(desc))
8129                         /* All the vfid is zero, so need to delete this entry */
8130                         status = hclge_remove_mac_vlan_tbl(vport, &req);
8131                 else
8132                         /* Not all the vfid is zero, update the vfid */
8133                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8134
8135         } else if (status == -ENOENT) {
8136                 status = 0;
8137         }
8138
8139         return status;
8140 }
8141
8142 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8143                                       struct list_head *list,
8144                                       int (*sync)(struct hclge_vport *,
8145                                                   const unsigned char *))
8146 {
8147         struct hclge_mac_node *mac_node, *tmp;
8148         int ret;
8149
8150         list_for_each_entry_safe(mac_node, tmp, list, node) {
8151                 ret = sync(vport, mac_node->mac_addr);
8152                 if (!ret) {
8153                         mac_node->state = HCLGE_MAC_ACTIVE;
8154                 } else {
8155                         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8156                                 &vport->state);
8157                         break;
8158                 }
8159         }
8160 }
8161
8162 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8163                                         struct list_head *list,
8164                                         int (*unsync)(struct hclge_vport *,
8165                                                       const unsigned char *))
8166 {
8167         struct hclge_mac_node *mac_node, *tmp;
8168         int ret;
8169
8170         list_for_each_entry_safe(mac_node, tmp, list, node) {
8171                 ret = unsync(vport, mac_node->mac_addr);
8172                 if (!ret || ret == -ENOENT) {
8173                         list_del(&mac_node->node);
8174                         kfree(mac_node);
8175                 } else {
8176                         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8177                                 &vport->state);
8178                         break;
8179                 }
8180         }
8181 }
8182
8183 static bool hclge_sync_from_add_list(struct list_head *add_list,
8184                                      struct list_head *mac_list)
8185 {
8186         struct hclge_mac_node *mac_node, *tmp, *new_node;
8187         bool all_added = true;
8188
8189         list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8190                 if (mac_node->state == HCLGE_MAC_TO_ADD)
8191                         all_added = false;
8192
8193                 /* if the mac address from tmp_add_list is not in the
8194                  * uc/mc_mac_list, it means have received a TO_DEL request
8195                  * during the time window of adding the mac address into mac
8196                  * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8197                  * then it will be removed at next time. else it must be TO_ADD,
8198                  * this address hasn't been added into mac table,
8199                  * so just remove the mac node.
8200                  */
8201                 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8202                 if (new_node) {
8203                         hclge_update_mac_node(new_node, mac_node->state);
8204                         list_del(&mac_node->node);
8205                         kfree(mac_node);
8206                 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8207                         mac_node->state = HCLGE_MAC_TO_DEL;
8208                         list_del(&mac_node->node);
8209                         list_add_tail(&mac_node->node, mac_list);
8210                 } else {
8211                         list_del(&mac_node->node);
8212                         kfree(mac_node);
8213                 }
8214         }
8215
8216         return all_added;
8217 }
8218
8219 static void hclge_sync_from_del_list(struct list_head *del_list,
8220                                      struct list_head *mac_list)
8221 {
8222         struct hclge_mac_node *mac_node, *tmp, *new_node;
8223
8224         list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8225                 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8226                 if (new_node) {
8227                         /* If the mac addr exists in the mac list, it means
8228                          * received a new TO_ADD request during the time window
8229                          * of configuring the mac address. For the mac node
8230                          * state is TO_ADD, and the address is already in the
8231                          * in the hardware(due to delete fail), so we just need
8232                          * to change the mac node state to ACTIVE.
8233                          */
8234                         new_node->state = HCLGE_MAC_ACTIVE;
8235                         list_del(&mac_node->node);
8236                         kfree(mac_node);
8237                 } else {
8238                         list_del(&mac_node->node);
8239                         list_add_tail(&mac_node->node, mac_list);
8240                 }
8241         }
8242 }
8243
8244 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8245                                         enum HCLGE_MAC_ADDR_TYPE mac_type,
8246                                         bool is_all_added)
8247 {
8248         if (mac_type == HCLGE_MAC_ADDR_UC) {
8249                 if (is_all_added)
8250                         vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8251                 else
8252                         vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8253         } else {
8254                 if (is_all_added)
8255                         vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8256                 else
8257                         vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8258         }
8259 }
8260
8261 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8262                                        enum HCLGE_MAC_ADDR_TYPE mac_type)
8263 {
8264         struct hclge_mac_node *mac_node, *tmp, *new_node;
8265         struct list_head tmp_add_list, tmp_del_list;
8266         struct list_head *list;
8267         bool all_added;
8268
8269         INIT_LIST_HEAD(&tmp_add_list);
8270         INIT_LIST_HEAD(&tmp_del_list);
8271
8272         /* move the mac addr to the tmp_add_list and tmp_del_list, then
8273          * we can add/delete these mac addr outside the spin lock
8274          */
8275         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8276                 &vport->uc_mac_list : &vport->mc_mac_list;
8277
8278         spin_lock_bh(&vport->mac_list_lock);
8279
8280         list_for_each_entry_safe(mac_node, tmp, list, node) {
8281                 switch (mac_node->state) {
8282                 case HCLGE_MAC_TO_DEL:
8283                         list_del(&mac_node->node);
8284                         list_add_tail(&mac_node->node, &tmp_del_list);
8285                         break;
8286                 case HCLGE_MAC_TO_ADD:
8287                         new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8288                         if (!new_node)
8289                                 goto stop_traverse;
8290                         ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8291                         new_node->state = mac_node->state;
8292                         list_add_tail(&new_node->node, &tmp_add_list);
8293                         break;
8294                 default:
8295                         break;
8296                 }
8297         }
8298
8299 stop_traverse:
8300         spin_unlock_bh(&vport->mac_list_lock);
8301
8302         /* delete first, in order to get max mac table space for adding */
8303         if (mac_type == HCLGE_MAC_ADDR_UC) {
8304                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8305                                             hclge_rm_uc_addr_common);
8306                 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8307                                           hclge_add_uc_addr_common);
8308         } else {
8309                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8310                                             hclge_rm_mc_addr_common);
8311                 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8312                                           hclge_add_mc_addr_common);
8313         }
8314
8315         /* if some mac addresses were added/deleted fail, move back to the
8316          * mac_list, and retry at next time.
8317          */
8318         spin_lock_bh(&vport->mac_list_lock);
8319
8320         hclge_sync_from_del_list(&tmp_del_list, list);
8321         all_added = hclge_sync_from_add_list(&tmp_add_list, list);
8322
8323         spin_unlock_bh(&vport->mac_list_lock);
8324
8325         hclge_update_overflow_flags(vport, mac_type, all_added);
8326 }
8327
8328 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
8329 {
8330         struct hclge_dev *hdev = vport->back;
8331
8332         if (test_bit(vport->vport_id, hdev->vport_config_block))
8333                 return false;
8334
8335         if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
8336                 return true;
8337
8338         return false;
8339 }
8340
8341 static void hclge_sync_mac_table(struct hclge_dev *hdev)
8342 {
8343         int i;
8344
8345         for (i = 0; i < hdev->num_alloc_vport; i++) {
8346                 struct hclge_vport *vport = &hdev->vport[i];
8347
8348                 if (!hclge_need_sync_mac_table(vport))
8349                         continue;
8350
8351                 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
8352                 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
8353         }
8354 }
8355
8356 static void hclge_build_del_list(struct list_head *list,
8357                                  bool is_del_list,
8358                                  struct list_head *tmp_del_list)
8359 {
8360         struct hclge_mac_node *mac_cfg, *tmp;
8361
8362         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
8363                 switch (mac_cfg->state) {
8364                 case HCLGE_MAC_TO_DEL:
8365                 case HCLGE_MAC_ACTIVE:
8366                         list_del(&mac_cfg->node);
8367                         list_add_tail(&mac_cfg->node, tmp_del_list);
8368                         break;
8369                 case HCLGE_MAC_TO_ADD:
8370                         if (is_del_list) {
8371                                 list_del(&mac_cfg->node);
8372                                 kfree(mac_cfg);
8373                         }
8374                         break;
8375                 }
8376         }
8377 }
8378
8379 static void hclge_unsync_del_list(struct hclge_vport *vport,
8380                                   int (*unsync)(struct hclge_vport *vport,
8381                                                 const unsigned char *addr),
8382                                   bool is_del_list,
8383                                   struct list_head *tmp_del_list)
8384 {
8385         struct hclge_mac_node *mac_cfg, *tmp;
8386         int ret;
8387
8388         list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
8389                 ret = unsync(vport, mac_cfg->mac_addr);
8390                 if (!ret || ret == -ENOENT) {
8391                         /* clear all mac addr from hardware, but remain these
8392                          * mac addr in the mac list, and restore them after
8393                          * vf reset finished.
8394                          */
8395                         if (!is_del_list &&
8396                             mac_cfg->state == HCLGE_MAC_ACTIVE) {
8397                                 mac_cfg->state = HCLGE_MAC_TO_ADD;
8398                         } else {
8399                                 list_del(&mac_cfg->node);
8400                                 kfree(mac_cfg);
8401                         }
8402                 } else if (is_del_list) {
8403                         mac_cfg->state = HCLGE_MAC_TO_DEL;
8404                 }
8405         }
8406 }
8407
8408 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
8409                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
8410 {
8411         int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
8412         struct hclge_dev *hdev = vport->back;
8413         struct list_head tmp_del_list, *list;
8414
8415         if (mac_type == HCLGE_MAC_ADDR_UC) {
8416                 list = &vport->uc_mac_list;
8417                 unsync = hclge_rm_uc_addr_common;
8418         } else {
8419                 list = &vport->mc_mac_list;
8420                 unsync = hclge_rm_mc_addr_common;
8421         }
8422
8423         INIT_LIST_HEAD(&tmp_del_list);
8424
8425         if (!is_del_list)
8426                 set_bit(vport->vport_id, hdev->vport_config_block);
8427
8428         spin_lock_bh(&vport->mac_list_lock);
8429
8430         hclge_build_del_list(list, is_del_list, &tmp_del_list);
8431
8432         spin_unlock_bh(&vport->mac_list_lock);
8433
8434         hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
8435
8436         spin_lock_bh(&vport->mac_list_lock);
8437
8438         hclge_sync_from_del_list(&tmp_del_list, list);
8439
8440         spin_unlock_bh(&vport->mac_list_lock);
8441 }
8442
8443 /* remove all mac address when uninitailize */
8444 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
8445                                         enum HCLGE_MAC_ADDR_TYPE mac_type)
8446 {
8447         struct hclge_mac_node *mac_node, *tmp;
8448         struct hclge_dev *hdev = vport->back;
8449         struct list_head tmp_del_list, *list;
8450
8451         INIT_LIST_HEAD(&tmp_del_list);
8452
8453         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8454                 &vport->uc_mac_list : &vport->mc_mac_list;
8455
8456         spin_lock_bh(&vport->mac_list_lock);
8457
8458         list_for_each_entry_safe(mac_node, tmp, list, node) {
8459                 switch (mac_node->state) {
8460                 case HCLGE_MAC_TO_DEL:
8461                 case HCLGE_MAC_ACTIVE:
8462                         list_del(&mac_node->node);
8463                         list_add_tail(&mac_node->node, &tmp_del_list);
8464                         break;
8465                 case HCLGE_MAC_TO_ADD:
8466                         list_del(&mac_node->node);
8467                         kfree(mac_node);
8468                         break;
8469                 }
8470         }
8471
8472         spin_unlock_bh(&vport->mac_list_lock);
8473
8474         if (mac_type == HCLGE_MAC_ADDR_UC)
8475                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8476                                             hclge_rm_uc_addr_common);
8477         else
8478                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8479                                             hclge_rm_mc_addr_common);
8480
8481         if (!list_empty(&tmp_del_list))
8482                 dev_warn(&hdev->pdev->dev,
8483                          "uninit %s mac list for vport %u not completely.\n",
8484                          mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
8485                          vport->vport_id);
8486
8487         list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
8488                 list_del(&mac_node->node);
8489                 kfree(mac_node);
8490         }
8491 }
8492
8493 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
8494 {
8495         struct hclge_vport *vport;
8496         int i;
8497
8498         for (i = 0; i < hdev->num_alloc_vport; i++) {
8499                 vport = &hdev->vport[i];
8500                 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
8501                 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
8502         }
8503 }
8504
8505 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
8506                                               u16 cmdq_resp, u8 resp_code)
8507 {
8508 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
8509 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
8510 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
8511 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
8512
8513         int return_status;
8514
8515         if (cmdq_resp) {
8516                 dev_err(&hdev->pdev->dev,
8517                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
8518                         cmdq_resp);
8519                 return -EIO;
8520         }
8521
8522         switch (resp_code) {
8523         case HCLGE_ETHERTYPE_SUCCESS_ADD:
8524         case HCLGE_ETHERTYPE_ALREADY_ADD:
8525                 return_status = 0;
8526                 break;
8527         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
8528                 dev_err(&hdev->pdev->dev,
8529                         "add mac ethertype failed for manager table overflow.\n");
8530                 return_status = -EIO;
8531                 break;
8532         case HCLGE_ETHERTYPE_KEY_CONFLICT:
8533                 dev_err(&hdev->pdev->dev,
8534                         "add mac ethertype failed for key conflict.\n");
8535                 return_status = -EIO;
8536                 break;
8537         default:
8538                 dev_err(&hdev->pdev->dev,
8539                         "add mac ethertype failed for undefined, code=%u.\n",
8540                         resp_code);
8541                 return_status = -EIO;
8542         }
8543
8544         return return_status;
8545 }
8546
8547 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
8548                                      u8 *mac_addr)
8549 {
8550         struct hclge_mac_vlan_tbl_entry_cmd req;
8551         struct hclge_dev *hdev = vport->back;
8552         struct hclge_desc desc;
8553         u16 egress_port = 0;
8554         int i;
8555
8556         if (is_zero_ether_addr(mac_addr))
8557                 return false;
8558
8559         memset(&req, 0, sizeof(req));
8560         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8561                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8562         req.egress_port = cpu_to_le16(egress_port);
8563         hclge_prepare_mac_addr(&req, mac_addr, false);
8564
8565         if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8566                 return true;
8567
8568         vf_idx += HCLGE_VF_VPORT_START_NUM;
8569         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8570                 if (i != vf_idx &&
8571                     ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8572                         return true;
8573
8574         return false;
8575 }
8576
8577 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8578                             u8 *mac_addr)
8579 {
8580         struct hclge_vport *vport = hclge_get_vport(handle);
8581         struct hclge_dev *hdev = vport->back;
8582
8583         vport = hclge_get_vf_vport(hdev, vf);
8584         if (!vport)
8585                 return -EINVAL;
8586
8587         if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8588                 dev_info(&hdev->pdev->dev,
8589                          "Specified MAC(=%pM) is same as before, no change committed!\n",
8590                          mac_addr);
8591                 return 0;
8592         }
8593
8594         if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8595                 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8596                         mac_addr);
8597                 return -EEXIST;
8598         }
8599
8600         ether_addr_copy(vport->vf_info.mac, mac_addr);
8601
8602         if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8603                 dev_info(&hdev->pdev->dev,
8604                          "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8605                          vf, mac_addr);
8606                 return hclge_inform_reset_assert_to_vf(vport);
8607         }
8608
8609         dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8610                  vf, mac_addr);
8611         return 0;
8612 }
8613
8614 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8615                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
8616 {
8617         struct hclge_desc desc;
8618         u8 resp_code;
8619         u16 retval;
8620         int ret;
8621
8622         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8623         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8624
8625         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8626         if (ret) {
8627                 dev_err(&hdev->pdev->dev,
8628                         "add mac ethertype failed for cmd_send, ret =%d.\n",
8629                         ret);
8630                 return ret;
8631         }
8632
8633         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8634         retval = le16_to_cpu(desc.retval);
8635
8636         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8637 }
8638
8639 static int init_mgr_tbl(struct hclge_dev *hdev)
8640 {
8641         int ret;
8642         int i;
8643
8644         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8645                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8646                 if (ret) {
8647                         dev_err(&hdev->pdev->dev,
8648                                 "add mac ethertype failed, ret =%d.\n",
8649                                 ret);
8650                         return ret;
8651                 }
8652         }
8653
8654         return 0;
8655 }
8656
8657 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8658 {
8659         struct hclge_vport *vport = hclge_get_vport(handle);
8660         struct hclge_dev *hdev = vport->back;
8661
8662         ether_addr_copy(p, hdev->hw.mac.mac_addr);
8663 }
8664
8665 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8666                                        const u8 *old_addr, const u8 *new_addr)
8667 {
8668         struct list_head *list = &vport->uc_mac_list;
8669         struct hclge_mac_node *old_node, *new_node;
8670
8671         new_node = hclge_find_mac_node(list, new_addr);
8672         if (!new_node) {
8673                 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8674                 if (!new_node)
8675                         return -ENOMEM;
8676
8677                 new_node->state = HCLGE_MAC_TO_ADD;
8678                 ether_addr_copy(new_node->mac_addr, new_addr);
8679                 list_add(&new_node->node, list);
8680         } else {
8681                 if (new_node->state == HCLGE_MAC_TO_DEL)
8682                         new_node->state = HCLGE_MAC_ACTIVE;
8683
8684                 /* make sure the new addr is in the list head, avoid dev
8685                  * addr may be not re-added into mac table for the umv space
8686                  * limitation after global/imp reset which will clear mac
8687                  * table by hardware.
8688                  */
8689                 list_move(&new_node->node, list);
8690         }
8691
8692         if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8693                 old_node = hclge_find_mac_node(list, old_addr);
8694                 if (old_node) {
8695                         if (old_node->state == HCLGE_MAC_TO_ADD) {
8696                                 list_del(&old_node->node);
8697                                 kfree(old_node);
8698                         } else {
8699                                 old_node->state = HCLGE_MAC_TO_DEL;
8700                         }
8701                 }
8702         }
8703
8704         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8705
8706         return 0;
8707 }
8708
8709 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8710                               bool is_first)
8711 {
8712         const unsigned char *new_addr = (const unsigned char *)p;
8713         struct hclge_vport *vport = hclge_get_vport(handle);
8714         struct hclge_dev *hdev = vport->back;
8715         unsigned char *old_addr = NULL;
8716         int ret;
8717
8718         /* mac addr check */
8719         if (is_zero_ether_addr(new_addr) ||
8720             is_broadcast_ether_addr(new_addr) ||
8721             is_multicast_ether_addr(new_addr)) {
8722                 dev_err(&hdev->pdev->dev,
8723                         "change uc mac err! invalid mac: %pM.\n",
8724                          new_addr);
8725                 return -EINVAL;
8726         }
8727
8728         ret = hclge_pause_addr_cfg(hdev, new_addr);
8729         if (ret) {
8730                 dev_err(&hdev->pdev->dev,
8731                         "failed to configure mac pause address, ret = %d\n",
8732                         ret);
8733                 return ret;
8734         }
8735
8736         if (!is_first)
8737                 old_addr = hdev->hw.mac.mac_addr;
8738
8739         spin_lock_bh(&vport->mac_list_lock);
8740         ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
8741         if (ret) {
8742                 dev_err(&hdev->pdev->dev,
8743                         "failed to change the mac addr:%pM, ret = %d\n",
8744                         new_addr, ret);
8745                 spin_unlock_bh(&vport->mac_list_lock);
8746
8747                 if (!is_first)
8748                         hclge_pause_addr_cfg(hdev, old_addr);
8749
8750                 return ret;
8751         }
8752         /* we must update dev addr with spin lock protect, preventing dev addr
8753          * being removed by set_rx_mode path.
8754          */
8755         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
8756         spin_unlock_bh(&vport->mac_list_lock);
8757
8758         hclge_task_schedule(hdev, 0);
8759
8760         return 0;
8761 }
8762
8763 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8764                           int cmd)
8765 {
8766         struct hclge_vport *vport = hclge_get_vport(handle);
8767         struct hclge_dev *hdev = vport->back;
8768
8769         if (!hdev->hw.mac.phydev)
8770                 return -EOPNOTSUPP;
8771
8772         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8773 }
8774
8775 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
8776                                       u8 fe_type, bool filter_en, u8 vf_id)
8777 {
8778         struct hclge_vlan_filter_ctrl_cmd *req;
8779         struct hclge_desc desc;
8780         int ret;
8781
8782         /* read current vlan filter parameter */
8783         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
8784         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
8785         req->vlan_type = vlan_type;
8786         req->vf_id = vf_id;
8787
8788         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8789         if (ret) {
8790                 dev_err(&hdev->pdev->dev,
8791                         "failed to get vlan filter config, ret = %d.\n", ret);
8792                 return ret;
8793         }
8794
8795         /* modify and write new config parameter */
8796         hclge_cmd_reuse_desc(&desc, false);
8797         req->vlan_fe = filter_en ?
8798                         (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
8799
8800         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8801         if (ret)
8802                 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
8803                         ret);
8804
8805         return ret;
8806 }
8807
8808 #define HCLGE_FILTER_TYPE_VF            0
8809 #define HCLGE_FILTER_TYPE_PORT          1
8810 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
8811 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
8812 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
8813 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
8814 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
8815 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
8816                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
8817 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
8818                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
8819
8820 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
8821 {
8822         struct hclge_vport *vport = hclge_get_vport(handle);
8823         struct hclge_dev *hdev = vport->back;
8824
8825         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8826                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8827                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
8828                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8829                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
8830         } else {
8831                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8832                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
8833                                            0);
8834         }
8835         if (enable)
8836                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8837         else
8838                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
8839 }
8840
8841 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
8842                                         bool is_kill, u16 vlan,
8843                                         struct hclge_desc *desc)
8844 {
8845         struct hclge_vlan_filter_vf_cfg_cmd *req0;
8846         struct hclge_vlan_filter_vf_cfg_cmd *req1;
8847         u8 vf_byte_val;
8848         u8 vf_byte_off;
8849         int ret;
8850
8851         hclge_cmd_setup_basic_desc(&desc[0],
8852                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8853         hclge_cmd_setup_basic_desc(&desc[1],
8854                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8855
8856         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8857
8858         vf_byte_off = vfid / 8;
8859         vf_byte_val = 1 << (vfid % 8);
8860
8861         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8862         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
8863
8864         req0->vlan_id  = cpu_to_le16(vlan);
8865         req0->vlan_cfg = is_kill;
8866
8867         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
8868                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
8869         else
8870                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
8871
8872         ret = hclge_cmd_send(&hdev->hw, desc, 2);
8873         if (ret) {
8874                 dev_err(&hdev->pdev->dev,
8875                         "Send vf vlan command fail, ret =%d.\n",
8876                         ret);
8877                 return ret;
8878         }
8879
8880         return 0;
8881 }
8882
8883 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
8884                                           bool is_kill, struct hclge_desc *desc)
8885 {
8886         struct hclge_vlan_filter_vf_cfg_cmd *req;
8887
8888         req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8889
8890         if (!is_kill) {
8891 #define HCLGE_VF_VLAN_NO_ENTRY  2
8892                 if (!req->resp_code || req->resp_code == 1)
8893                         return 0;
8894
8895                 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
8896                         set_bit(vfid, hdev->vf_vlan_full);
8897                         dev_warn(&hdev->pdev->dev,
8898                                  "vf vlan table is full, vf vlan filter is disabled\n");
8899                         return 0;
8900                 }
8901
8902                 dev_err(&hdev->pdev->dev,
8903                         "Add vf vlan filter fail, ret =%u.\n",
8904                         req->resp_code);
8905         } else {
8906 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
8907                 if (!req->resp_code)
8908                         return 0;
8909
8910                 /* vf vlan filter is disabled when vf vlan table is full,
8911                  * then new vlan id will not be added into vf vlan table.
8912                  * Just return 0 without warning, avoid massive verbose
8913                  * print logs when unload.
8914                  */
8915                 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
8916                         return 0;
8917
8918                 dev_err(&hdev->pdev->dev,
8919                         "Kill vf vlan filter fail, ret =%u.\n",
8920                         req->resp_code);
8921         }
8922
8923         return -EIO;
8924 }
8925
8926 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
8927                                     bool is_kill, u16 vlan,
8928                                     __be16 proto)
8929 {
8930         struct hclge_vport *vport = &hdev->vport[vfid];
8931         struct hclge_desc desc[2];
8932         int ret;
8933
8934         /* if vf vlan table is full, firmware will close vf vlan filter, it
8935          * is unable and unnecessary to add new vlan id to vf vlan filter.
8936          * If spoof check is enable, and vf vlan is full, it shouldn't add
8937          * new vlan, because tx packets with these vlan id will be dropped.
8938          */
8939         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
8940                 if (vport->vf_info.spoofchk && vlan) {
8941                         dev_err(&hdev->pdev->dev,
8942                                 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
8943                         return -EPERM;
8944                 }
8945                 return 0;
8946         }
8947
8948         ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
8949         if (ret)
8950                 return ret;
8951
8952         return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
8953 }
8954
8955 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
8956                                       u16 vlan_id, bool is_kill)
8957 {
8958         struct hclge_vlan_filter_pf_cfg_cmd *req;
8959         struct hclge_desc desc;
8960         u8 vlan_offset_byte_val;
8961         u8 vlan_offset_byte;
8962         u8 vlan_offset_160;
8963         int ret;
8964
8965         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
8966
8967         vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
8968         vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
8969                            HCLGE_VLAN_BYTE_SIZE;
8970         vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
8971
8972         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
8973         req->vlan_offset = vlan_offset_160;
8974         req->vlan_cfg = is_kill;
8975         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
8976
8977         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8978         if (ret)
8979                 dev_err(&hdev->pdev->dev,
8980                         "port vlan command, send fail, ret =%d.\n", ret);
8981         return ret;
8982 }
8983
8984 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
8985                                     u16 vport_id, u16 vlan_id,
8986                                     bool is_kill)
8987 {
8988         u16 vport_idx, vport_num = 0;
8989         int ret;
8990
8991         if (is_kill && !vlan_id)
8992                 return 0;
8993
8994         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
8995                                        proto);
8996         if (ret) {
8997                 dev_err(&hdev->pdev->dev,
8998                         "Set %u vport vlan filter config fail, ret =%d.\n",
8999                         vport_id, ret);
9000                 return ret;
9001         }
9002
9003         /* vlan 0 may be added twice when 8021q module is enabled */
9004         if (!is_kill && !vlan_id &&
9005             test_bit(vport_id, hdev->vlan_table[vlan_id]))
9006                 return 0;
9007
9008         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9009                 dev_err(&hdev->pdev->dev,
9010                         "Add port vlan failed, vport %u is already in vlan %u\n",
9011                         vport_id, vlan_id);
9012                 return -EINVAL;
9013         }
9014
9015         if (is_kill &&
9016             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9017                 dev_err(&hdev->pdev->dev,
9018                         "Delete port vlan failed, vport %u is not in vlan %u\n",
9019                         vport_id, vlan_id);
9020                 return -EINVAL;
9021         }
9022
9023         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9024                 vport_num++;
9025
9026         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9027                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9028                                                  is_kill);
9029
9030         return ret;
9031 }
9032
9033 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9034 {
9035         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9036         struct hclge_vport_vtag_tx_cfg_cmd *req;
9037         struct hclge_dev *hdev = vport->back;
9038         struct hclge_desc desc;
9039         u16 bmap_index;
9040         int status;
9041
9042         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9043
9044         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9045         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9046         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9047         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9048                       vcfg->accept_tag1 ? 1 : 0);
9049         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9050                       vcfg->accept_untag1 ? 1 : 0);
9051         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9052                       vcfg->accept_tag2 ? 1 : 0);
9053         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9054                       vcfg->accept_untag2 ? 1 : 0);
9055         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9056                       vcfg->insert_tag1_en ? 1 : 0);
9057         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9058                       vcfg->insert_tag2_en ? 1 : 0);
9059         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9060                       vcfg->tag_shift_mode_en ? 1 : 0);
9061         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9062
9063         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9064         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9065                         HCLGE_VF_NUM_PER_BYTE;
9066         req->vf_bitmap[bmap_index] =
9067                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9068
9069         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9070         if (status)
9071                 dev_err(&hdev->pdev->dev,
9072                         "Send port txvlan cfg command fail, ret =%d\n",
9073                         status);
9074
9075         return status;
9076 }
9077
9078 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9079 {
9080         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9081         struct hclge_vport_vtag_rx_cfg_cmd *req;
9082         struct hclge_dev *hdev = vport->back;
9083         struct hclge_desc desc;
9084         u16 bmap_index;
9085         int status;
9086
9087         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9088
9089         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9090         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9091                       vcfg->strip_tag1_en ? 1 : 0);
9092         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9093                       vcfg->strip_tag2_en ? 1 : 0);
9094         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9095                       vcfg->vlan1_vlan_prionly ? 1 : 0);
9096         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9097                       vcfg->vlan2_vlan_prionly ? 1 : 0);
9098         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9099                       vcfg->strip_tag1_discard_en ? 1 : 0);
9100         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9101                       vcfg->strip_tag2_discard_en ? 1 : 0);
9102
9103         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9104         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9105                         HCLGE_VF_NUM_PER_BYTE;
9106         req->vf_bitmap[bmap_index] =
9107                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9108
9109         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9110         if (status)
9111                 dev_err(&hdev->pdev->dev,
9112                         "Send port rxvlan cfg command fail, ret =%d\n",
9113                         status);
9114
9115         return status;
9116 }
9117
9118 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9119                                   u16 port_base_vlan_state,
9120                                   u16 vlan_tag)
9121 {
9122         int ret;
9123
9124         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9125                 vport->txvlan_cfg.accept_tag1 = true;
9126                 vport->txvlan_cfg.insert_tag1_en = false;
9127                 vport->txvlan_cfg.default_tag1 = 0;
9128         } else {
9129                 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9130
9131                 vport->txvlan_cfg.accept_tag1 =
9132                         ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9133                 vport->txvlan_cfg.insert_tag1_en = true;
9134                 vport->txvlan_cfg.default_tag1 = vlan_tag;
9135         }
9136
9137         vport->txvlan_cfg.accept_untag1 = true;
9138
9139         /* accept_tag2 and accept_untag2 are not supported on
9140          * pdev revision(0x20), new revision support them,
9141          * this two fields can not be configured by user.
9142          */
9143         vport->txvlan_cfg.accept_tag2 = true;
9144         vport->txvlan_cfg.accept_untag2 = true;
9145         vport->txvlan_cfg.insert_tag2_en = false;
9146         vport->txvlan_cfg.default_tag2 = 0;
9147         vport->txvlan_cfg.tag_shift_mode_en = true;
9148
9149         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9150                 vport->rxvlan_cfg.strip_tag1_en = false;
9151                 vport->rxvlan_cfg.strip_tag2_en =
9152                                 vport->rxvlan_cfg.rx_vlan_offload_en;
9153                 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9154         } else {
9155                 vport->rxvlan_cfg.strip_tag1_en =
9156                                 vport->rxvlan_cfg.rx_vlan_offload_en;
9157                 vport->rxvlan_cfg.strip_tag2_en = true;
9158                 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9159         }
9160
9161         vport->rxvlan_cfg.strip_tag1_discard_en = false;
9162         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9163         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9164
9165         ret = hclge_set_vlan_tx_offload_cfg(vport);
9166         if (ret)
9167                 return ret;
9168
9169         return hclge_set_vlan_rx_offload_cfg(vport);
9170 }
9171
9172 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9173 {
9174         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9175         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9176         struct hclge_desc desc;
9177         int status;
9178
9179         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9180         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9181         rx_req->ot_fst_vlan_type =
9182                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9183         rx_req->ot_sec_vlan_type =
9184                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9185         rx_req->in_fst_vlan_type =
9186                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9187         rx_req->in_sec_vlan_type =
9188                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9189
9190         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9191         if (status) {
9192                 dev_err(&hdev->pdev->dev,
9193                         "Send rxvlan protocol type command fail, ret =%d\n",
9194                         status);
9195                 return status;
9196         }
9197
9198         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9199
9200         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9201         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9202         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9203
9204         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9205         if (status)
9206                 dev_err(&hdev->pdev->dev,
9207                         "Send txvlan protocol type command fail, ret =%d\n",
9208                         status);
9209
9210         return status;
9211 }
9212
9213 static int hclge_init_vlan_config(struct hclge_dev *hdev)
9214 {
9215 #define HCLGE_DEF_VLAN_TYPE             0x8100
9216
9217         struct hnae3_handle *handle = &hdev->vport[0].nic;
9218         struct hclge_vport *vport;
9219         int ret;
9220         int i;
9221
9222         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9223                 /* for revision 0x21, vf vlan filter is per function */
9224                 for (i = 0; i < hdev->num_alloc_vport; i++) {
9225                         vport = &hdev->vport[i];
9226                         ret = hclge_set_vlan_filter_ctrl(hdev,
9227                                                          HCLGE_FILTER_TYPE_VF,
9228                                                          HCLGE_FILTER_FE_EGRESS,
9229                                                          true,
9230                                                          vport->vport_id);
9231                         if (ret)
9232                                 return ret;
9233                 }
9234
9235                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9236                                                  HCLGE_FILTER_FE_INGRESS, true,
9237                                                  0);
9238                 if (ret)
9239                         return ret;
9240         } else {
9241                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9242                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
9243                                                  true, 0);
9244                 if (ret)
9245                         return ret;
9246         }
9247
9248         handle->netdev_flags |= HNAE3_VLAN_FLTR;
9249
9250         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9251         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9252         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9253         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9254         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
9255         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
9256
9257         ret = hclge_set_vlan_protocol_type(hdev);
9258         if (ret)
9259                 return ret;
9260
9261         for (i = 0; i < hdev->num_alloc_vport; i++) {
9262                 u16 vlan_tag;
9263
9264                 vport = &hdev->vport[i];
9265                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9266
9267                 ret = hclge_vlan_offload_cfg(vport,
9268                                              vport->port_base_vlan_cfg.state,
9269                                              vlan_tag);
9270                 if (ret)
9271                         return ret;
9272         }
9273
9274         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
9275 }
9276
9277 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9278                                        bool writen_to_tbl)
9279 {
9280         struct hclge_vport_vlan_cfg *vlan;
9281
9282         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
9283         if (!vlan)
9284                 return;
9285
9286         vlan->hd_tbl_status = writen_to_tbl;
9287         vlan->vlan_id = vlan_id;
9288
9289         list_add_tail(&vlan->node, &vport->vlan_list);
9290 }
9291
9292 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
9293 {
9294         struct hclge_vport_vlan_cfg *vlan, *tmp;
9295         struct hclge_dev *hdev = vport->back;
9296         int ret;
9297
9298         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9299                 if (!vlan->hd_tbl_status) {
9300                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9301                                                        vport->vport_id,
9302                                                        vlan->vlan_id, false);
9303                         if (ret) {
9304                                 dev_err(&hdev->pdev->dev,
9305                                         "restore vport vlan list failed, ret=%d\n",
9306                                         ret);
9307                                 return ret;
9308                         }
9309                 }
9310                 vlan->hd_tbl_status = true;
9311         }
9312
9313         return 0;
9314 }
9315
9316 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9317                                       bool is_write_tbl)
9318 {
9319         struct hclge_vport_vlan_cfg *vlan, *tmp;
9320         struct hclge_dev *hdev = vport->back;
9321
9322         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9323                 if (vlan->vlan_id == vlan_id) {
9324                         if (is_write_tbl && vlan->hd_tbl_status)
9325                                 hclge_set_vlan_filter_hw(hdev,
9326                                                          htons(ETH_P_8021Q),
9327                                                          vport->vport_id,
9328                                                          vlan_id,
9329                                                          true);
9330
9331                         list_del(&vlan->node);
9332                         kfree(vlan);
9333                         break;
9334                 }
9335         }
9336 }
9337
9338 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
9339 {
9340         struct hclge_vport_vlan_cfg *vlan, *tmp;
9341         struct hclge_dev *hdev = vport->back;
9342
9343         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9344                 if (vlan->hd_tbl_status)
9345                         hclge_set_vlan_filter_hw(hdev,
9346                                                  htons(ETH_P_8021Q),
9347                                                  vport->vport_id,
9348                                                  vlan->vlan_id,
9349                                                  true);
9350
9351                 vlan->hd_tbl_status = false;
9352                 if (is_del_list) {
9353                         list_del(&vlan->node);
9354                         kfree(vlan);
9355                 }
9356         }
9357         clear_bit(vport->vport_id, hdev->vf_vlan_full);
9358 }
9359
9360 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
9361 {
9362         struct hclge_vport_vlan_cfg *vlan, *tmp;
9363         struct hclge_vport *vport;
9364         int i;
9365
9366         for (i = 0; i < hdev->num_alloc_vport; i++) {
9367                 vport = &hdev->vport[i];
9368                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9369                         list_del(&vlan->node);
9370                         kfree(vlan);
9371                 }
9372         }
9373 }
9374
9375 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
9376 {
9377         struct hclge_vport_vlan_cfg *vlan, *tmp;
9378         struct hclge_dev *hdev = vport->back;
9379         u16 vlan_proto;
9380         u16 vlan_id;
9381         u16 state;
9382         int ret;
9383
9384         vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
9385         vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9386         state = vport->port_base_vlan_cfg.state;
9387
9388         if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
9389                 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
9390                 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
9391                                          vport->vport_id, vlan_id,
9392                                          false);
9393                 return;
9394         }
9395
9396         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9397                 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9398                                                vport->vport_id,
9399                                                vlan->vlan_id, false);
9400                 if (ret)
9401                         break;
9402                 vlan->hd_tbl_status = true;
9403         }
9404 }
9405
9406 /* For global reset and imp reset, hardware will clear the mac table,
9407  * so we change the mac address state from ACTIVE to TO_ADD, then they
9408  * can be restored in the service task after reset complete. Furtherly,
9409  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
9410  * be restored after reset, so just remove these mac nodes from mac_list.
9411  */
9412 static void hclge_mac_node_convert_for_reset(struct list_head *list)
9413 {
9414         struct hclge_mac_node *mac_node, *tmp;
9415
9416         list_for_each_entry_safe(mac_node, tmp, list, node) {
9417                 if (mac_node->state == HCLGE_MAC_ACTIVE) {
9418                         mac_node->state = HCLGE_MAC_TO_ADD;
9419                 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
9420                         list_del(&mac_node->node);
9421                         kfree(mac_node);
9422                 }
9423         }
9424 }
9425
9426 void hclge_restore_mac_table_common(struct hclge_vport *vport)
9427 {
9428         spin_lock_bh(&vport->mac_list_lock);
9429
9430         hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
9431         hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
9432         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9433
9434         spin_unlock_bh(&vport->mac_list_lock);
9435 }
9436
9437 static void hclge_restore_hw_table(struct hclge_dev *hdev)
9438 {
9439         struct hclge_vport *vport = &hdev->vport[0];
9440         struct hnae3_handle *handle = &vport->nic;
9441
9442         hclge_restore_mac_table_common(vport);
9443         hclge_restore_vport_vlan_table(vport);
9444         set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
9445
9446         hclge_restore_fd_entries(handle);
9447 }
9448
9449 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
9450 {
9451         struct hclge_vport *vport = hclge_get_vport(handle);
9452
9453         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9454                 vport->rxvlan_cfg.strip_tag1_en = false;
9455                 vport->rxvlan_cfg.strip_tag2_en = enable;
9456                 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9457         } else {
9458                 vport->rxvlan_cfg.strip_tag1_en = enable;
9459                 vport->rxvlan_cfg.strip_tag2_en = true;
9460                 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9461         }
9462
9463         vport->rxvlan_cfg.strip_tag1_discard_en = false;
9464         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9465         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9466         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
9467
9468         return hclge_set_vlan_rx_offload_cfg(vport);
9469 }
9470
9471 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
9472                                             u16 port_base_vlan_state,
9473                                             struct hclge_vlan_info *new_info,
9474                                             struct hclge_vlan_info *old_info)
9475 {
9476         struct hclge_dev *hdev = vport->back;
9477         int ret;
9478
9479         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
9480                 hclge_rm_vport_all_vlan_table(vport, false);
9481                 return hclge_set_vlan_filter_hw(hdev,
9482                                                  htons(new_info->vlan_proto),
9483                                                  vport->vport_id,
9484                                                  new_info->vlan_tag,
9485                                                  false);
9486         }
9487
9488         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
9489                                        vport->vport_id, old_info->vlan_tag,
9490                                        true);
9491         if (ret)
9492                 return ret;
9493
9494         return hclge_add_vport_all_vlan_table(vport);
9495 }
9496
9497 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
9498                                     struct hclge_vlan_info *vlan_info)
9499 {
9500         struct hnae3_handle *nic = &vport->nic;
9501         struct hclge_vlan_info *old_vlan_info;
9502         struct hclge_dev *hdev = vport->back;
9503         int ret;
9504
9505         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9506
9507         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
9508         if (ret)
9509                 return ret;
9510
9511         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
9512                 /* add new VLAN tag */
9513                 ret = hclge_set_vlan_filter_hw(hdev,
9514                                                htons(vlan_info->vlan_proto),
9515                                                vport->vport_id,
9516                                                vlan_info->vlan_tag,
9517                                                false);
9518                 if (ret)
9519                         return ret;
9520
9521                 /* remove old VLAN tag */
9522                 ret = hclge_set_vlan_filter_hw(hdev,
9523                                                htons(old_vlan_info->vlan_proto),
9524                                                vport->vport_id,
9525                                                old_vlan_info->vlan_tag,
9526                                                true);
9527                 if (ret)
9528                         return ret;
9529
9530                 goto update;
9531         }
9532
9533         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
9534                                                old_vlan_info);
9535         if (ret)
9536                 return ret;
9537
9538         /* update state only when disable/enable port based VLAN */
9539         vport->port_base_vlan_cfg.state = state;
9540         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
9541                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
9542         else
9543                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
9544
9545 update:
9546         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
9547         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
9548         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
9549
9550         return 0;
9551 }
9552
9553 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
9554                                           enum hnae3_port_base_vlan_state state,
9555                                           u16 vlan)
9556 {
9557         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9558                 if (!vlan)
9559                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9560                 else
9561                         return HNAE3_PORT_BASE_VLAN_ENABLE;
9562         } else {
9563                 if (!vlan)
9564                         return HNAE3_PORT_BASE_VLAN_DISABLE;
9565                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
9566                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9567                 else
9568                         return HNAE3_PORT_BASE_VLAN_MODIFY;
9569         }
9570 }
9571
9572 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
9573                                     u16 vlan, u8 qos, __be16 proto)
9574 {
9575         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
9576         struct hclge_vport *vport = hclge_get_vport(handle);
9577         struct hclge_dev *hdev = vport->back;
9578         struct hclge_vlan_info vlan_info;
9579         u16 state;
9580         int ret;
9581
9582         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9583                 return -EOPNOTSUPP;
9584
9585         vport = hclge_get_vf_vport(hdev, vfid);
9586         if (!vport)
9587                 return -EINVAL;
9588
9589         /* qos is a 3 bits value, so can not be bigger than 7 */
9590         if (vlan > VLAN_N_VID - 1 || qos > 7)
9591                 return -EINVAL;
9592         if (proto != htons(ETH_P_8021Q))
9593                 return -EPROTONOSUPPORT;
9594
9595         state = hclge_get_port_base_vlan_state(vport,
9596                                                vport->port_base_vlan_cfg.state,
9597                                                vlan);
9598         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
9599                 return 0;
9600
9601         vlan_info.vlan_tag = vlan;
9602         vlan_info.qos = qos;
9603         vlan_info.vlan_proto = ntohs(proto);
9604
9605         ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
9606         if (ret) {
9607                 dev_err(&hdev->pdev->dev,
9608                         "failed to update port base vlan for vf %d, ret = %d\n",
9609                         vfid, ret);
9610                 return ret;
9611         }
9612
9613         /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
9614          * VLAN state.
9615          */
9616         if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
9617             test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
9618                 hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9619                                                   vport->vport_id, state,
9620                                                   vlan, qos,
9621                                                   ntohs(proto));
9622
9623         return 0;
9624 }
9625
9626 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9627 {
9628         struct hclge_vlan_info *vlan_info;
9629         struct hclge_vport *vport;
9630         int ret;
9631         int vf;
9632
9633         /* clear port base vlan for all vf */
9634         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9635                 vport = &hdev->vport[vf];
9636                 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9637
9638                 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9639                                                vport->vport_id,
9640                                                vlan_info->vlan_tag, true);
9641                 if (ret)
9642                         dev_err(&hdev->pdev->dev,
9643                                 "failed to clear vf vlan for vf%d, ret = %d\n",
9644                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
9645         }
9646 }
9647
9648 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9649                           u16 vlan_id, bool is_kill)
9650 {
9651         struct hclge_vport *vport = hclge_get_vport(handle);
9652         struct hclge_dev *hdev = vport->back;
9653         bool writen_to_tbl = false;
9654         int ret = 0;
9655
9656         /* When device is resetting or reset failed, firmware is unable to
9657          * handle mailbox. Just record the vlan id, and remove it after
9658          * reset finished.
9659          */
9660         if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9661              test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
9662                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9663                 return -EBUSY;
9664         }
9665
9666         /* when port base vlan enabled, we use port base vlan as the vlan
9667          * filter entry. In this case, we don't update vlan filter table
9668          * when user add new vlan or remove exist vlan, just update the vport
9669          * vlan list. The vlan id in vlan list will be writen in vlan filter
9670          * table until port base vlan disabled
9671          */
9672         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9673                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
9674                                                vlan_id, is_kill);
9675                 writen_to_tbl = true;
9676         }
9677
9678         if (!ret) {
9679                 if (is_kill)
9680                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
9681                 else
9682                         hclge_add_vport_vlan_table(vport, vlan_id,
9683                                                    writen_to_tbl);
9684         } else if (is_kill) {
9685                 /* when remove hw vlan filter failed, record the vlan id,
9686                  * and try to remove it from hw later, to be consistence
9687                  * with stack
9688                  */
9689                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9690         }
9691         return ret;
9692 }
9693
9694 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9695 {
9696 #define HCLGE_MAX_SYNC_COUNT    60
9697
9698         int i, ret, sync_cnt = 0;
9699         u16 vlan_id;
9700
9701         /* start from vport 1 for PF is always alive */
9702         for (i = 0; i < hdev->num_alloc_vport; i++) {
9703                 struct hclge_vport *vport = &hdev->vport[i];
9704
9705                 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9706                                          VLAN_N_VID);
9707                 while (vlan_id != VLAN_N_VID) {
9708                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9709                                                        vport->vport_id, vlan_id,
9710                                                        true);
9711                         if (ret && ret != -EINVAL)
9712                                 return;
9713
9714                         clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9715                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
9716
9717                         sync_cnt++;
9718                         if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9719                                 return;
9720
9721                         vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9722                                                  VLAN_N_VID);
9723                 }
9724         }
9725 }
9726
9727 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
9728 {
9729         struct hclge_config_max_frm_size_cmd *req;
9730         struct hclge_desc desc;
9731
9732         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9733
9734         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
9735         req->max_frm_size = cpu_to_le16(new_mps);
9736         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
9737
9738         return hclge_cmd_send(&hdev->hw, &desc, 1);
9739 }
9740
9741 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9742 {
9743         struct hclge_vport *vport = hclge_get_vport(handle);
9744
9745         return hclge_set_vport_mtu(vport, new_mtu);
9746 }
9747
9748 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9749 {
9750         struct hclge_dev *hdev = vport->back;
9751         int i, max_frm_size, ret;
9752
9753         /* HW supprt 2 layer vlan */
9754         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9755         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9756             max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
9757                 return -EINVAL;
9758
9759         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9760         mutex_lock(&hdev->vport_lock);
9761         /* VF's mps must fit within hdev->mps */
9762         if (vport->vport_id && max_frm_size > hdev->mps) {
9763                 mutex_unlock(&hdev->vport_lock);
9764                 return -EINVAL;
9765         } else if (vport->vport_id) {
9766                 vport->mps = max_frm_size;
9767                 mutex_unlock(&hdev->vport_lock);
9768                 return 0;
9769         }
9770
9771         /* PF's mps must be greater then VF's mps */
9772         for (i = 1; i < hdev->num_alloc_vport; i++)
9773                 if (max_frm_size < hdev->vport[i].mps) {
9774                         mutex_unlock(&hdev->vport_lock);
9775                         return -EINVAL;
9776                 }
9777
9778         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9779
9780         ret = hclge_set_mac_mtu(hdev, max_frm_size);
9781         if (ret) {
9782                 dev_err(&hdev->pdev->dev,
9783                         "Change mtu fail, ret =%d\n", ret);
9784                 goto out;
9785         }
9786
9787         hdev->mps = max_frm_size;
9788         vport->mps = max_frm_size;
9789
9790         ret = hclge_buffer_alloc(hdev);
9791         if (ret)
9792                 dev_err(&hdev->pdev->dev,
9793                         "Allocate buffer fail, ret =%d\n", ret);
9794
9795 out:
9796         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
9797         mutex_unlock(&hdev->vport_lock);
9798         return ret;
9799 }
9800
9801 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
9802                                     bool enable)
9803 {
9804         struct hclge_reset_tqp_queue_cmd *req;
9805         struct hclge_desc desc;
9806         int ret;
9807
9808         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
9809
9810         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9811         req->tqp_id = cpu_to_le16(queue_id);
9812         if (enable)
9813                 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
9814
9815         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9816         if (ret) {
9817                 dev_err(&hdev->pdev->dev,
9818                         "Send tqp reset cmd error, status =%d\n", ret);
9819                 return ret;
9820         }
9821
9822         return 0;
9823 }
9824
9825 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
9826 {
9827         struct hclge_reset_tqp_queue_cmd *req;
9828         struct hclge_desc desc;
9829         int ret;
9830
9831         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
9832
9833         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9834         req->tqp_id = cpu_to_le16(queue_id);
9835
9836         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9837         if (ret) {
9838                 dev_err(&hdev->pdev->dev,
9839                         "Get reset status error, status =%d\n", ret);
9840                 return ret;
9841         }
9842
9843         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
9844 }
9845
9846 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
9847 {
9848         struct hnae3_queue *queue;
9849         struct hclge_tqp *tqp;
9850
9851         queue = handle->kinfo.tqp[queue_id];
9852         tqp = container_of(queue, struct hclge_tqp, q);
9853
9854         return tqp->index;
9855 }
9856
9857 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
9858 {
9859         struct hclge_vport *vport = hclge_get_vport(handle);
9860         struct hclge_dev *hdev = vport->back;
9861         int reset_try_times = 0;
9862         int reset_status;
9863         u16 queue_gid;
9864         int ret;
9865
9866         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
9867
9868         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
9869         if (ret) {
9870                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
9871                 return ret;
9872         }
9873
9874         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9875         if (ret) {
9876                 dev_err(&hdev->pdev->dev,
9877                         "Send reset tqp cmd fail, ret = %d\n", ret);
9878                 return ret;
9879         }
9880
9881         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9882                 reset_status = hclge_get_reset_status(hdev, queue_gid);
9883                 if (reset_status)
9884                         break;
9885
9886                 /* Wait for tqp hw reset */
9887                 usleep_range(1000, 1200);
9888         }
9889
9890         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9891                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
9892                 return ret;
9893         }
9894
9895         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9896         if (ret)
9897                 dev_err(&hdev->pdev->dev,
9898                         "Deassert the soft reset fail, ret = %d\n", ret);
9899
9900         return ret;
9901 }
9902
9903 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
9904 {
9905         struct hnae3_handle *handle = &vport->nic;
9906         struct hclge_dev *hdev = vport->back;
9907         int reset_try_times = 0;
9908         int reset_status;
9909         u16 queue_gid;
9910         int ret;
9911
9912         if (queue_id >= handle->kinfo.num_tqps) {
9913                 dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
9914                          queue_id);
9915                 return;
9916         }
9917
9918         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
9919
9920         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9921         if (ret) {
9922                 dev_warn(&hdev->pdev->dev,
9923                          "Send reset tqp cmd fail, ret = %d\n", ret);
9924                 return;
9925         }
9926
9927         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9928                 reset_status = hclge_get_reset_status(hdev, queue_gid);
9929                 if (reset_status)
9930                         break;
9931
9932                 /* Wait for tqp hw reset */
9933                 usleep_range(1000, 1200);
9934         }
9935
9936         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9937                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
9938                 return;
9939         }
9940
9941         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9942         if (ret)
9943                 dev_warn(&hdev->pdev->dev,
9944                          "Deassert the soft reset fail, ret = %d\n", ret);
9945 }
9946
9947 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
9948 {
9949         struct hclge_vport *vport = hclge_get_vport(handle);
9950         struct hclge_dev *hdev = vport->back;
9951
9952         return hdev->fw_version;
9953 }
9954
9955 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9956 {
9957         struct phy_device *phydev = hdev->hw.mac.phydev;
9958
9959         if (!phydev)
9960                 return;
9961
9962         phy_set_asym_pause(phydev, rx_en, tx_en);
9963 }
9964
9965 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9966 {
9967         int ret;
9968
9969         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
9970                 return 0;
9971
9972         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
9973         if (ret)
9974                 dev_err(&hdev->pdev->dev,
9975                         "configure pauseparam error, ret = %d.\n", ret);
9976
9977         return ret;
9978 }
9979
9980 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
9981 {
9982         struct phy_device *phydev = hdev->hw.mac.phydev;
9983         u16 remote_advertising = 0;
9984         u16 local_advertising;
9985         u32 rx_pause, tx_pause;
9986         u8 flowctl;
9987
9988         if (!phydev->link || !phydev->autoneg)
9989                 return 0;
9990
9991         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
9992
9993         if (phydev->pause)
9994                 remote_advertising = LPA_PAUSE_CAP;
9995
9996         if (phydev->asym_pause)
9997                 remote_advertising |= LPA_PAUSE_ASYM;
9998
9999         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10000                                            remote_advertising);
10001         tx_pause = flowctl & FLOW_CTRL_TX;
10002         rx_pause = flowctl & FLOW_CTRL_RX;
10003
10004         if (phydev->duplex == HCLGE_MAC_HALF) {
10005                 tx_pause = 0;
10006                 rx_pause = 0;
10007         }
10008
10009         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10010 }
10011
10012 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10013                                  u32 *rx_en, u32 *tx_en)
10014 {
10015         struct hclge_vport *vport = hclge_get_vport(handle);
10016         struct hclge_dev *hdev = vport->back;
10017         struct phy_device *phydev = hdev->hw.mac.phydev;
10018
10019         *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
10020
10021         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10022                 *rx_en = 0;
10023                 *tx_en = 0;
10024                 return;
10025         }
10026
10027         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10028                 *rx_en = 1;
10029                 *tx_en = 0;
10030         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10031                 *tx_en = 1;
10032                 *rx_en = 0;
10033         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10034                 *rx_en = 1;
10035                 *tx_en = 1;
10036         } else {
10037                 *rx_en = 0;
10038                 *tx_en = 0;
10039         }
10040 }
10041
10042 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10043                                          u32 rx_en, u32 tx_en)
10044 {
10045         if (rx_en && tx_en)
10046                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
10047         else if (rx_en && !tx_en)
10048                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10049         else if (!rx_en && tx_en)
10050                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10051         else
10052                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
10053
10054         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10055 }
10056
10057 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10058                                 u32 rx_en, u32 tx_en)
10059 {
10060         struct hclge_vport *vport = hclge_get_vport(handle);
10061         struct hclge_dev *hdev = vport->back;
10062         struct phy_device *phydev = hdev->hw.mac.phydev;
10063         u32 fc_autoneg;
10064
10065         if (phydev) {
10066                 fc_autoneg = hclge_get_autoneg(handle);
10067                 if (auto_neg != fc_autoneg) {
10068                         dev_info(&hdev->pdev->dev,
10069                                  "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10070                         return -EOPNOTSUPP;
10071                 }
10072         }
10073
10074         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10075                 dev_info(&hdev->pdev->dev,
10076                          "Priority flow control enabled. Cannot set link flow control.\n");
10077                 return -EOPNOTSUPP;
10078         }
10079
10080         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10081
10082         hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10083
10084         if (!auto_neg)
10085                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10086
10087         if (phydev)
10088                 return phy_start_aneg(phydev);
10089
10090         return -EOPNOTSUPP;
10091 }
10092
10093 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10094                                           u8 *auto_neg, u32 *speed, u8 *duplex)
10095 {
10096         struct hclge_vport *vport = hclge_get_vport(handle);
10097         struct hclge_dev *hdev = vport->back;
10098
10099         if (speed)
10100                 *speed = hdev->hw.mac.speed;
10101         if (duplex)
10102                 *duplex = hdev->hw.mac.duplex;
10103         if (auto_neg)
10104                 *auto_neg = hdev->hw.mac.autoneg;
10105 }
10106
10107 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
10108                                  u8 *module_type)
10109 {
10110         struct hclge_vport *vport = hclge_get_vport(handle);
10111         struct hclge_dev *hdev = vport->back;
10112
10113         /* When nic is down, the service task is not running, doesn't update
10114          * the port information per second. Query the port information before
10115          * return the media type, ensure getting the correct media information.
10116          */
10117         hclge_update_port_info(hdev);
10118
10119         if (media_type)
10120                 *media_type = hdev->hw.mac.media_type;
10121
10122         if (module_type)
10123                 *module_type = hdev->hw.mac.module_type;
10124 }
10125
10126 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
10127                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
10128 {
10129         struct hclge_vport *vport = hclge_get_vport(handle);
10130         struct hclge_dev *hdev = vport->back;
10131         struct phy_device *phydev = hdev->hw.mac.phydev;
10132         int mdix_ctrl, mdix, is_resolved;
10133         unsigned int retval;
10134
10135         if (!phydev) {
10136                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10137                 *tp_mdix = ETH_TP_MDI_INVALID;
10138                 return;
10139         }
10140
10141         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
10142
10143         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
10144         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
10145                                     HCLGE_PHY_MDIX_CTRL_S);
10146
10147         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
10148         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
10149         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
10150
10151         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
10152
10153         switch (mdix_ctrl) {
10154         case 0x0:
10155                 *tp_mdix_ctrl = ETH_TP_MDI;
10156                 break;
10157         case 0x1:
10158                 *tp_mdix_ctrl = ETH_TP_MDI_X;
10159                 break;
10160         case 0x3:
10161                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
10162                 break;
10163         default:
10164                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10165                 break;
10166         }
10167
10168         if (!is_resolved)
10169                 *tp_mdix = ETH_TP_MDI_INVALID;
10170         else if (mdix)
10171                 *tp_mdix = ETH_TP_MDI_X;
10172         else
10173                 *tp_mdix = ETH_TP_MDI;
10174 }
10175
10176 static void hclge_info_show(struct hclge_dev *hdev)
10177 {
10178         struct device *dev = &hdev->pdev->dev;
10179
10180         dev_info(dev, "PF info begin:\n");
10181
10182         dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
10183         dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
10184         dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
10185         dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
10186         dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
10187         dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
10188         dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
10189         dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
10190         dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
10191         dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
10192         dev_info(dev, "This is %s PF\n",
10193                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
10194         dev_info(dev, "DCB %s\n",
10195                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
10196         dev_info(dev, "MQPRIO %s\n",
10197                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
10198
10199         dev_info(dev, "PF info end.\n");
10200 }
10201
10202 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
10203                                           struct hclge_vport *vport)
10204 {
10205         struct hnae3_client *client = vport->nic.client;
10206         struct hclge_dev *hdev = ae_dev->priv;
10207         int rst_cnt = hdev->rst_stats.reset_cnt;
10208         int ret;
10209
10210         ret = client->ops->init_instance(&vport->nic);
10211         if (ret)
10212                 return ret;
10213
10214         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10215         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10216             rst_cnt != hdev->rst_stats.reset_cnt) {
10217                 ret = -EBUSY;
10218                 goto init_nic_err;
10219         }
10220
10221         /* Enable nic hw error interrupts */
10222         ret = hclge_config_nic_hw_error(hdev, true);
10223         if (ret) {
10224                 dev_err(&ae_dev->pdev->dev,
10225                         "fail(%d) to enable hw error interrupts\n", ret);
10226                 goto init_nic_err;
10227         }
10228
10229         hnae3_set_client_init_flag(client, ae_dev, 1);
10230
10231         if (netif_msg_drv(&hdev->vport->nic))
10232                 hclge_info_show(hdev);
10233
10234         return ret;
10235
10236 init_nic_err:
10237         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10238         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10239                 msleep(HCLGE_WAIT_RESET_DONE);
10240
10241         client->ops->uninit_instance(&vport->nic, 0);
10242
10243         return ret;
10244 }
10245
10246 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
10247                                            struct hclge_vport *vport)
10248 {
10249         struct hclge_dev *hdev = ae_dev->priv;
10250         struct hnae3_client *client;
10251         int rst_cnt;
10252         int ret;
10253
10254         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
10255             !hdev->nic_client)
10256                 return 0;
10257
10258         client = hdev->roce_client;
10259         ret = hclge_init_roce_base_info(vport);
10260         if (ret)
10261                 return ret;
10262
10263         rst_cnt = hdev->rst_stats.reset_cnt;
10264         ret = client->ops->init_instance(&vport->roce);
10265         if (ret)
10266                 return ret;
10267
10268         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10269         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10270             rst_cnt != hdev->rst_stats.reset_cnt) {
10271                 ret = -EBUSY;
10272                 goto init_roce_err;
10273         }
10274
10275         /* Enable roce ras interrupts */
10276         ret = hclge_config_rocee_ras_interrupt(hdev, true);
10277         if (ret) {
10278                 dev_err(&ae_dev->pdev->dev,
10279                         "fail(%d) to enable roce ras interrupts\n", ret);
10280                 goto init_roce_err;
10281         }
10282
10283         hnae3_set_client_init_flag(client, ae_dev, 1);
10284
10285         return 0;
10286
10287 init_roce_err:
10288         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10289         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10290                 msleep(HCLGE_WAIT_RESET_DONE);
10291
10292         hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
10293
10294         return ret;
10295 }
10296
10297 static int hclge_init_client_instance(struct hnae3_client *client,
10298                                       struct hnae3_ae_dev *ae_dev)
10299 {
10300         struct hclge_dev *hdev = ae_dev->priv;
10301         struct hclge_vport *vport;
10302         int i, ret;
10303
10304         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
10305                 vport = &hdev->vport[i];
10306
10307                 switch (client->type) {
10308                 case HNAE3_CLIENT_KNIC:
10309                         hdev->nic_client = client;
10310                         vport->nic.client = client;
10311                         ret = hclge_init_nic_client_instance(ae_dev, vport);
10312                         if (ret)
10313                                 goto clear_nic;
10314
10315                         ret = hclge_init_roce_client_instance(ae_dev, vport);
10316                         if (ret)
10317                                 goto clear_roce;
10318
10319                         break;
10320                 case HNAE3_CLIENT_ROCE:
10321                         if (hnae3_dev_roce_supported(hdev)) {
10322                                 hdev->roce_client = client;
10323                                 vport->roce.client = client;
10324                         }
10325
10326                         ret = hclge_init_roce_client_instance(ae_dev, vport);
10327                         if (ret)
10328                                 goto clear_roce;
10329
10330                         break;
10331                 default:
10332                         return -EINVAL;
10333                 }
10334         }
10335
10336         return 0;
10337
10338 clear_nic:
10339         hdev->nic_client = NULL;
10340         vport->nic.client = NULL;
10341         return ret;
10342 clear_roce:
10343         hdev->roce_client = NULL;
10344         vport->roce.client = NULL;
10345         return ret;
10346 }
10347
10348 static void hclge_uninit_client_instance(struct hnae3_client *client,
10349                                          struct hnae3_ae_dev *ae_dev)
10350 {
10351         struct hclge_dev *hdev = ae_dev->priv;
10352         struct hclge_vport *vport;
10353         int i;
10354
10355         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
10356                 vport = &hdev->vport[i];
10357                 if (hdev->roce_client) {
10358                         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10359                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10360                                 msleep(HCLGE_WAIT_RESET_DONE);
10361
10362                         hdev->roce_client->ops->uninit_instance(&vport->roce,
10363                                                                 0);
10364                         hdev->roce_client = NULL;
10365                         vport->roce.client = NULL;
10366                 }
10367                 if (client->type == HNAE3_CLIENT_ROCE)
10368                         return;
10369                 if (hdev->nic_client && client->ops->uninit_instance) {
10370                         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10371                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10372                                 msleep(HCLGE_WAIT_RESET_DONE);
10373
10374                         client->ops->uninit_instance(&vport->nic, 0);
10375                         hdev->nic_client = NULL;
10376                         vport->nic.client = NULL;
10377                 }
10378         }
10379 }
10380
10381 static int hclge_dev_mem_map(struct hclge_dev *hdev)
10382 {
10383 #define HCLGE_MEM_BAR           4
10384
10385         struct pci_dev *pdev = hdev->pdev;
10386         struct hclge_hw *hw = &hdev->hw;
10387
10388         /* for device does not have device memory, return directly */
10389         if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
10390                 return 0;
10391
10392         hw->mem_base = devm_ioremap_wc(&pdev->dev,
10393                                        pci_resource_start(pdev, HCLGE_MEM_BAR),
10394                                        pci_resource_len(pdev, HCLGE_MEM_BAR));
10395         if (!hw->mem_base) {
10396                 dev_err(&pdev->dev, "failed to map device memory\n");
10397                 return -EFAULT;
10398         }
10399
10400         return 0;
10401 }
10402
10403 static int hclge_pci_init(struct hclge_dev *hdev)
10404 {
10405         struct pci_dev *pdev = hdev->pdev;
10406         struct hclge_hw *hw;
10407         int ret;
10408
10409         ret = pci_enable_device(pdev);
10410         if (ret) {
10411                 dev_err(&pdev->dev, "failed to enable PCI device\n");
10412                 return ret;
10413         }
10414
10415         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10416         if (ret) {
10417                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10418                 if (ret) {
10419                         dev_err(&pdev->dev,
10420                                 "can't set consistent PCI DMA");
10421                         goto err_disable_device;
10422                 }
10423                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
10424         }
10425
10426         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
10427         if (ret) {
10428                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
10429                 goto err_disable_device;
10430         }
10431
10432         pci_set_master(pdev);
10433         hw = &hdev->hw;
10434         hw->io_base = pcim_iomap(pdev, 2, 0);
10435         if (!hw->io_base) {
10436                 dev_err(&pdev->dev, "Can't map configuration register space\n");
10437                 ret = -ENOMEM;
10438                 goto err_clr_master;
10439         }
10440
10441         ret = hclge_dev_mem_map(hdev);
10442         if (ret)
10443                 goto err_unmap_io_base;
10444
10445         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
10446
10447         return 0;
10448
10449 err_unmap_io_base:
10450         pcim_iounmap(pdev, hdev->hw.io_base);
10451 err_clr_master:
10452         pci_clear_master(pdev);
10453         pci_release_regions(pdev);
10454 err_disable_device:
10455         pci_disable_device(pdev);
10456
10457         return ret;
10458 }
10459
10460 static void hclge_pci_uninit(struct hclge_dev *hdev)
10461 {
10462         struct pci_dev *pdev = hdev->pdev;
10463
10464         if (hdev->hw.mem_base)
10465                 devm_iounmap(&pdev->dev, hdev->hw.mem_base);
10466
10467         pcim_iounmap(pdev, hdev->hw.io_base);
10468         pci_free_irq_vectors(pdev);
10469         pci_clear_master(pdev);
10470         pci_release_mem_regions(pdev);
10471         pci_disable_device(pdev);
10472 }
10473
10474 static void hclge_state_init(struct hclge_dev *hdev)
10475 {
10476         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
10477         set_bit(HCLGE_STATE_DOWN, &hdev->state);
10478         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
10479         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10480         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
10481         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
10482         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
10483 }
10484
10485 static void hclge_state_uninit(struct hclge_dev *hdev)
10486 {
10487         set_bit(HCLGE_STATE_DOWN, &hdev->state);
10488         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
10489
10490         if (hdev->reset_timer.function)
10491                 del_timer_sync(&hdev->reset_timer);
10492         if (hdev->service_task.work.func)
10493                 cancel_delayed_work_sync(&hdev->service_task);
10494 }
10495
10496 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
10497 {
10498 #define HCLGE_FLR_RETRY_WAIT_MS 500
10499 #define HCLGE_FLR_RETRY_CNT     5
10500
10501         struct hclge_dev *hdev = ae_dev->priv;
10502         int retry_cnt = 0;
10503         int ret;
10504
10505 retry:
10506         down(&hdev->reset_sem);
10507         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10508         hdev->reset_type = HNAE3_FLR_RESET;
10509         ret = hclge_reset_prepare(hdev);
10510         if (ret || hdev->reset_pending) {
10511                 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
10512                         ret);
10513                 if (hdev->reset_pending ||
10514                     retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
10515                         dev_err(&hdev->pdev->dev,
10516                                 "reset_pending:0x%lx, retry_cnt:%d\n",
10517                                 hdev->reset_pending, retry_cnt);
10518                         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10519                         up(&hdev->reset_sem);
10520                         msleep(HCLGE_FLR_RETRY_WAIT_MS);
10521                         goto retry;
10522                 }
10523         }
10524
10525         /* disable misc vector before FLR done */
10526         hclge_enable_vector(&hdev->misc_vector, false);
10527         set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
10528         hdev->rst_stats.flr_rst_cnt++;
10529 }
10530
10531 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
10532 {
10533         struct hclge_dev *hdev = ae_dev->priv;
10534         int ret;
10535
10536         hclge_enable_vector(&hdev->misc_vector, true);
10537
10538         ret = hclge_reset_rebuild(hdev);
10539         if (ret)
10540                 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
10541
10542         hdev->reset_type = HNAE3_NONE_RESET;
10543         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10544         up(&hdev->reset_sem);
10545 }
10546
10547 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
10548 {
10549         u16 i;
10550
10551         for (i = 0; i < hdev->num_alloc_vport; i++) {
10552                 struct hclge_vport *vport = &hdev->vport[i];
10553                 int ret;
10554
10555                  /* Send cmd to clear VF's FUNC_RST_ING */
10556                 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
10557                 if (ret)
10558                         dev_warn(&hdev->pdev->dev,
10559                                  "clear vf(%u) rst failed %d!\n",
10560                                  vport->vport_id, ret);
10561         }
10562 }
10563
10564 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
10565 {
10566         struct pci_dev *pdev = ae_dev->pdev;
10567         struct hclge_dev *hdev;
10568         int ret;
10569
10570         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
10571         if (!hdev)
10572                 return -ENOMEM;
10573
10574         hdev->pdev = pdev;
10575         hdev->ae_dev = ae_dev;
10576         hdev->reset_type = HNAE3_NONE_RESET;
10577         hdev->reset_level = HNAE3_FUNC_RESET;
10578         ae_dev->priv = hdev;
10579
10580         /* HW supprt 2 layer vlan */
10581         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10582
10583         mutex_init(&hdev->vport_lock);
10584         spin_lock_init(&hdev->fd_rule_lock);
10585         sema_init(&hdev->reset_sem, 1);
10586
10587         ret = hclge_pci_init(hdev);
10588         if (ret)
10589                 goto out;
10590
10591         /* Firmware command queue initialize */
10592         ret = hclge_cmd_queue_init(hdev);
10593         if (ret)
10594                 goto err_pci_uninit;
10595
10596         /* Firmware command initialize */
10597         ret = hclge_cmd_init(hdev);
10598         if (ret)
10599                 goto err_cmd_uninit;
10600
10601         ret = hclge_get_cap(hdev);
10602         if (ret)
10603                 goto err_cmd_uninit;
10604
10605         ret = hclge_query_dev_specs(hdev);
10606         if (ret) {
10607                 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
10608                         ret);
10609                 goto err_cmd_uninit;
10610         }
10611
10612         ret = hclge_configure(hdev);
10613         if (ret) {
10614                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
10615                 goto err_cmd_uninit;
10616         }
10617
10618         ret = hclge_init_msi(hdev);
10619         if (ret) {
10620                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
10621                 goto err_cmd_uninit;
10622         }
10623
10624         ret = hclge_misc_irq_init(hdev);
10625         if (ret)
10626                 goto err_msi_uninit;
10627
10628         ret = hclge_alloc_tqps(hdev);
10629         if (ret) {
10630                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
10631                 goto err_msi_irq_uninit;
10632         }
10633
10634         ret = hclge_alloc_vport(hdev);
10635         if (ret)
10636                 goto err_msi_irq_uninit;
10637
10638         ret = hclge_map_tqp(hdev);
10639         if (ret)
10640                 goto err_msi_irq_uninit;
10641
10642         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
10643                 ret = hclge_mac_mdio_config(hdev);
10644                 if (ret)
10645                         goto err_msi_irq_uninit;
10646         }
10647
10648         ret = hclge_init_umv_space(hdev);
10649         if (ret)
10650                 goto err_mdiobus_unreg;
10651
10652         ret = hclge_mac_init(hdev);
10653         if (ret) {
10654                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10655                 goto err_mdiobus_unreg;
10656         }
10657
10658         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10659         if (ret) {
10660                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10661                 goto err_mdiobus_unreg;
10662         }
10663
10664         ret = hclge_config_gro(hdev, true);
10665         if (ret)
10666                 goto err_mdiobus_unreg;
10667
10668         ret = hclge_init_vlan_config(hdev);
10669         if (ret) {
10670                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10671                 goto err_mdiobus_unreg;
10672         }
10673
10674         ret = hclge_tm_schd_init(hdev);
10675         if (ret) {
10676                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
10677                 goto err_mdiobus_unreg;
10678         }
10679
10680         ret = hclge_rss_init_cfg(hdev);
10681         if (ret) {
10682                 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
10683                 goto err_mdiobus_unreg;
10684         }
10685
10686         ret = hclge_rss_init_hw(hdev);
10687         if (ret) {
10688                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10689                 goto err_mdiobus_unreg;
10690         }
10691
10692         ret = init_mgr_tbl(hdev);
10693         if (ret) {
10694                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
10695                 goto err_mdiobus_unreg;
10696         }
10697
10698         ret = hclge_init_fd_config(hdev);
10699         if (ret) {
10700                 dev_err(&pdev->dev,
10701                         "fd table init fail, ret=%d\n", ret);
10702                 goto err_mdiobus_unreg;
10703         }
10704
10705         INIT_KFIFO(hdev->mac_tnl_log);
10706
10707         hclge_dcb_ops_set(hdev);
10708
10709         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
10710         INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
10711
10712         /* Setup affinity after service timer setup because add_timer_on
10713          * is called in affinity notify.
10714          */
10715         hclge_misc_affinity_setup(hdev);
10716
10717         hclge_clear_all_event_cause(hdev);
10718         hclge_clear_resetting_state(hdev);
10719
10720         /* Log and clear the hw errors those already occurred */
10721         hclge_handle_all_hns_hw_errors(ae_dev);
10722
10723         /* request delayed reset for the error recovery because an immediate
10724          * global reset on a PF affecting pending initialization of other PFs
10725          */
10726         if (ae_dev->hw_err_reset_req) {
10727                 enum hnae3_reset_type reset_level;
10728
10729                 reset_level = hclge_get_reset_level(ae_dev,
10730                                                     &ae_dev->hw_err_reset_req);
10731                 hclge_set_def_reset_request(ae_dev, reset_level);
10732                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10733         }
10734
10735         /* Enable MISC vector(vector0) */
10736         hclge_enable_vector(&hdev->misc_vector, true);
10737
10738         hclge_state_init(hdev);
10739         hdev->last_reset_time = jiffies;
10740
10741         dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10742                  HCLGE_DRIVER_NAME);
10743
10744         hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10745
10746         return 0;
10747
10748 err_mdiobus_unreg:
10749         if (hdev->hw.mac.phydev)
10750                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
10751 err_msi_irq_uninit:
10752         hclge_misc_irq_uninit(hdev);
10753 err_msi_uninit:
10754         pci_free_irq_vectors(pdev);
10755 err_cmd_uninit:
10756         hclge_cmd_uninit(hdev);
10757 err_pci_uninit:
10758         pcim_iounmap(pdev, hdev->hw.io_base);
10759         pci_clear_master(pdev);
10760         pci_release_regions(pdev);
10761         pci_disable_device(pdev);
10762 out:
10763         mutex_destroy(&hdev->vport_lock);
10764         return ret;
10765 }
10766
10767 static void hclge_stats_clear(struct hclge_dev *hdev)
10768 {
10769         memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
10770 }
10771
10772 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10773 {
10774         return hclge_config_switch_param(hdev, vf, enable,
10775                                          HCLGE_SWITCH_ANTI_SPOOF_MASK);
10776 }
10777
10778 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10779 {
10780         return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10781                                           HCLGE_FILTER_FE_NIC_INGRESS_B,
10782                                           enable, vf);
10783 }
10784
10785 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10786 {
10787         int ret;
10788
10789         ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10790         if (ret) {
10791                 dev_err(&hdev->pdev->dev,
10792                         "Set vf %d mac spoof check %s failed, ret=%d\n",
10793                         vf, enable ? "on" : "off", ret);
10794                 return ret;
10795         }
10796
10797         ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10798         if (ret)
10799                 dev_err(&hdev->pdev->dev,
10800                         "Set vf %d vlan spoof check %s failed, ret=%d\n",
10801                         vf, enable ? "on" : "off", ret);
10802
10803         return ret;
10804 }
10805
10806 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10807                                  bool enable)
10808 {
10809         struct hclge_vport *vport = hclge_get_vport(handle);
10810         struct hclge_dev *hdev = vport->back;
10811         u32 new_spoofchk = enable ? 1 : 0;
10812         int ret;
10813
10814         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10815                 return -EOPNOTSUPP;
10816
10817         vport = hclge_get_vf_vport(hdev, vf);
10818         if (!vport)
10819                 return -EINVAL;
10820
10821         if (vport->vf_info.spoofchk == new_spoofchk)
10822                 return 0;
10823
10824         if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
10825                 dev_warn(&hdev->pdev->dev,
10826                          "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
10827                          vf);
10828         else if (enable && hclge_is_umv_space_full(vport, true))
10829                 dev_warn(&hdev->pdev->dev,
10830                          "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
10831                          vf);
10832
10833         ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
10834         if (ret)
10835                 return ret;
10836
10837         vport->vf_info.spoofchk = new_spoofchk;
10838         return 0;
10839 }
10840
10841 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
10842 {
10843         struct hclge_vport *vport = hdev->vport;
10844         int ret;
10845         int i;
10846
10847         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10848                 return 0;
10849
10850         /* resume the vf spoof check state after reset */
10851         for (i = 0; i < hdev->num_alloc_vport; i++) {
10852                 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
10853                                                vport->vf_info.spoofchk);
10854                 if (ret)
10855                         return ret;
10856
10857                 vport++;
10858         }
10859
10860         return 0;
10861 }
10862
10863 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
10864 {
10865         struct hclge_vport *vport = hclge_get_vport(handle);
10866         struct hclge_dev *hdev = vport->back;
10867         struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
10868         u32 new_trusted = enable ? 1 : 0;
10869         bool en_bc_pmc;
10870         int ret;
10871
10872         vport = hclge_get_vf_vport(hdev, vf);
10873         if (!vport)
10874                 return -EINVAL;
10875
10876         if (vport->vf_info.trusted == new_trusted)
10877                 return 0;
10878
10879         /* Disable promisc mode for VF if it is not trusted any more. */
10880         if (!enable && vport->vf_info.promisc_enable) {
10881                 en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
10882                 ret = hclge_set_vport_promisc_mode(vport, false, false,
10883                                                    en_bc_pmc);
10884                 if (ret)
10885                         return ret;
10886                 vport->vf_info.promisc_enable = 0;
10887                 hclge_inform_vf_promisc_info(vport);
10888         }
10889
10890         vport->vf_info.trusted = new_trusted;
10891
10892         return 0;
10893 }
10894
10895 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
10896 {
10897         int ret;
10898         int vf;
10899
10900         /* reset vf rate to default value */
10901         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10902                 struct hclge_vport *vport = &hdev->vport[vf];
10903
10904                 vport->vf_info.max_tx_rate = 0;
10905                 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
10906                 if (ret)
10907                         dev_err(&hdev->pdev->dev,
10908                                 "vf%d failed to reset to default, ret=%d\n",
10909                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
10910         }
10911 }
10912
10913 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
10914                                      int min_tx_rate, int max_tx_rate)
10915 {
10916         if (min_tx_rate != 0 ||
10917             max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
10918                 dev_err(&hdev->pdev->dev,
10919                         "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
10920                         min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
10921                 return -EINVAL;
10922         }
10923
10924         return 0;
10925 }
10926
10927 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
10928                              int min_tx_rate, int max_tx_rate, bool force)
10929 {
10930         struct hclge_vport *vport = hclge_get_vport(handle);
10931         struct hclge_dev *hdev = vport->back;
10932         int ret;
10933
10934         ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
10935         if (ret)
10936                 return ret;
10937
10938         vport = hclge_get_vf_vport(hdev, vf);
10939         if (!vport)
10940                 return -EINVAL;
10941
10942         if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
10943                 return 0;
10944
10945         ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
10946         if (ret)
10947                 return ret;
10948
10949         vport->vf_info.max_tx_rate = max_tx_rate;
10950
10951         return 0;
10952 }
10953
10954 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
10955 {
10956         struct hnae3_handle *handle = &hdev->vport->nic;
10957         struct hclge_vport *vport;
10958         int ret;
10959         int vf;
10960
10961         /* resume the vf max_tx_rate after reset */
10962         for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
10963                 vport = hclge_get_vf_vport(hdev, vf);
10964                 if (!vport)
10965                         return -EINVAL;
10966
10967                 /* zero means max rate, after reset, firmware already set it to
10968                  * max rate, so just continue.
10969                  */
10970                 if (!vport->vf_info.max_tx_rate)
10971                         continue;
10972
10973                 ret = hclge_set_vf_rate(handle, vf, 0,
10974                                         vport->vf_info.max_tx_rate, true);
10975                 if (ret) {
10976                         dev_err(&hdev->pdev->dev,
10977                                 "vf%d failed to resume tx_rate:%u, ret=%d\n",
10978                                 vf, vport->vf_info.max_tx_rate, ret);
10979                         return ret;
10980                 }
10981         }
10982
10983         return 0;
10984 }
10985
10986 static void hclge_reset_vport_state(struct hclge_dev *hdev)
10987 {
10988         struct hclge_vport *vport = hdev->vport;
10989         int i;
10990
10991         for (i = 0; i < hdev->num_alloc_vport; i++) {
10992                 hclge_vport_stop(vport);
10993                 vport++;
10994         }
10995 }
10996
10997 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
10998 {
10999         struct hclge_dev *hdev = ae_dev->priv;
11000         struct pci_dev *pdev = ae_dev->pdev;
11001         int ret;
11002
11003         set_bit(HCLGE_STATE_DOWN, &hdev->state);
11004
11005         hclge_stats_clear(hdev);
11006         /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11007          * so here should not clean table in memory.
11008          */
11009         if (hdev->reset_type == HNAE3_IMP_RESET ||
11010             hdev->reset_type == HNAE3_GLOBAL_RESET) {
11011                 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11012                 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11013                 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11014                 hclge_reset_umv_space(hdev);
11015         }
11016
11017         ret = hclge_cmd_init(hdev);
11018         if (ret) {
11019                 dev_err(&pdev->dev, "Cmd queue init failed\n");
11020                 return ret;
11021         }
11022
11023         ret = hclge_map_tqp(hdev);
11024         if (ret) {
11025                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11026                 return ret;
11027         }
11028
11029         ret = hclge_mac_init(hdev);
11030         if (ret) {
11031                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11032                 return ret;
11033         }
11034
11035         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11036         if (ret) {
11037                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11038                 return ret;
11039         }
11040
11041         ret = hclge_config_gro(hdev, true);
11042         if (ret)
11043                 return ret;
11044
11045         ret = hclge_init_vlan_config(hdev);
11046         if (ret) {
11047                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11048                 return ret;
11049         }
11050
11051         ret = hclge_tm_init_hw(hdev, true);
11052         if (ret) {
11053                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
11054                 return ret;
11055         }
11056
11057         ret = hclge_rss_init_hw(hdev);
11058         if (ret) {
11059                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11060                 return ret;
11061         }
11062
11063         ret = init_mgr_tbl(hdev);
11064         if (ret) {
11065                 dev_err(&pdev->dev,
11066                         "failed to reinit manager table, ret = %d\n", ret);
11067                 return ret;
11068         }
11069
11070         ret = hclge_init_fd_config(hdev);
11071         if (ret) {
11072                 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
11073                 return ret;
11074         }
11075
11076         /* Log and clear the hw errors those already occurred */
11077         hclge_handle_all_hns_hw_errors(ae_dev);
11078
11079         /* Re-enable the hw error interrupts because
11080          * the interrupts get disabled on global reset.
11081          */
11082         ret = hclge_config_nic_hw_error(hdev, true);
11083         if (ret) {
11084                 dev_err(&pdev->dev,
11085                         "fail(%d) to re-enable NIC hw error interrupts\n",
11086                         ret);
11087                 return ret;
11088         }
11089
11090         if (hdev->roce_client) {
11091                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
11092                 if (ret) {
11093                         dev_err(&pdev->dev,
11094                                 "fail(%d) to re-enable roce ras interrupts\n",
11095                                 ret);
11096                         return ret;
11097                 }
11098         }
11099
11100         hclge_reset_vport_state(hdev);
11101         ret = hclge_reset_vport_spoofchk(hdev);
11102         if (ret)
11103                 return ret;
11104
11105         ret = hclge_resume_vf_rate(hdev);
11106         if (ret)
11107                 return ret;
11108
11109         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
11110                  HCLGE_DRIVER_NAME);
11111
11112         return 0;
11113 }
11114
11115 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
11116 {
11117         struct hclge_dev *hdev = ae_dev->priv;
11118         struct hclge_mac *mac = &hdev->hw.mac;
11119
11120         hclge_reset_vf_rate(hdev);
11121         hclge_clear_vf_vlan(hdev);
11122         hclge_misc_affinity_teardown(hdev);
11123         hclge_state_uninit(hdev);
11124         hclge_uninit_mac_table(hdev);
11125
11126         if (mac->phydev)
11127                 mdiobus_unregister(mac->mdio_bus);
11128
11129         /* Disable MISC vector(vector0) */
11130         hclge_enable_vector(&hdev->misc_vector, false);
11131         synchronize_irq(hdev->misc_vector.vector_irq);
11132
11133         /* Disable all hw interrupts */
11134         hclge_config_mac_tnl_int(hdev, false);
11135         hclge_config_nic_hw_error(hdev, false);
11136         hclge_config_rocee_ras_interrupt(hdev, false);
11137
11138         hclge_cmd_uninit(hdev);
11139         hclge_misc_irq_uninit(hdev);
11140         hclge_pci_uninit(hdev);
11141         mutex_destroy(&hdev->vport_lock);
11142         hclge_uninit_vport_vlan_table(hdev);
11143         ae_dev->priv = NULL;
11144 }
11145
11146 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
11147 {
11148         struct hclge_vport *vport = hclge_get_vport(handle);
11149         struct hclge_dev *hdev = vport->back;
11150
11151         return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
11152 }
11153
11154 static void hclge_get_channels(struct hnae3_handle *handle,
11155                                struct ethtool_channels *ch)
11156 {
11157         ch->max_combined = hclge_get_max_channels(handle);
11158         ch->other_count = 1;
11159         ch->max_other = 1;
11160         ch->combined_count = handle->kinfo.rss_size;
11161 }
11162
11163 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
11164                                         u16 *alloc_tqps, u16 *max_rss_size)
11165 {
11166         struct hclge_vport *vport = hclge_get_vport(handle);
11167         struct hclge_dev *hdev = vport->back;
11168
11169         *alloc_tqps = vport->alloc_tqps;
11170         *max_rss_size = hdev->pf_rss_size_max;
11171 }
11172
11173 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
11174                               bool rxfh_configured)
11175 {
11176         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
11177         struct hclge_vport *vport = hclge_get_vport(handle);
11178         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
11179         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
11180         struct hclge_dev *hdev = vport->back;
11181         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
11182         u16 cur_rss_size = kinfo->rss_size;
11183         u16 cur_tqps = kinfo->num_tqps;
11184         u16 tc_valid[HCLGE_MAX_TC_NUM];
11185         u16 roundup_size;
11186         u32 *rss_indir;
11187         unsigned int i;
11188         int ret;
11189
11190         kinfo->req_rss_size = new_tqps_num;
11191
11192         ret = hclge_tm_vport_map_update(hdev);
11193         if (ret) {
11194                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
11195                 return ret;
11196         }
11197
11198         roundup_size = roundup_pow_of_two(kinfo->rss_size);
11199         roundup_size = ilog2(roundup_size);
11200         /* Set the RSS TC mode according to the new RSS size */
11201         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
11202                 tc_valid[i] = 0;
11203
11204                 if (!(hdev->hw_tc_map & BIT(i)))
11205                         continue;
11206
11207                 tc_valid[i] = 1;
11208                 tc_size[i] = roundup_size;
11209                 tc_offset[i] = kinfo->rss_size * i;
11210         }
11211         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
11212         if (ret)
11213                 return ret;
11214
11215         /* RSS indirection table has been configuared by user */
11216         if (rxfh_configured)
11217                 goto out;
11218
11219         /* Reinitializes the rss indirect table according to the new RSS size */
11220         rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
11221                             GFP_KERNEL);
11222         if (!rss_indir)
11223                 return -ENOMEM;
11224
11225         for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
11226                 rss_indir[i] = i % kinfo->rss_size;
11227
11228         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
11229         if (ret)
11230                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
11231                         ret);
11232
11233         kfree(rss_indir);
11234
11235 out:
11236         if (!ret)
11237                 dev_info(&hdev->pdev->dev,
11238                          "Channels changed, rss_size from %u to %u, tqps from %u to %u",
11239                          cur_rss_size, kinfo->rss_size,
11240                          cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
11241
11242         return ret;
11243 }
11244
11245 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
11246                               u32 *regs_num_64_bit)
11247 {
11248         struct hclge_desc desc;
11249         u32 total_num;
11250         int ret;
11251
11252         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
11253         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11254         if (ret) {
11255                 dev_err(&hdev->pdev->dev,
11256                         "Query register number cmd failed, ret = %d.\n", ret);
11257                 return ret;
11258         }
11259
11260         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
11261         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
11262
11263         total_num = *regs_num_32_bit + *regs_num_64_bit;
11264         if (!total_num)
11265                 return -EINVAL;
11266
11267         return 0;
11268 }
11269
11270 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11271                                  void *data)
11272 {
11273 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
11274 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
11275
11276         struct hclge_desc *desc;
11277         u32 *reg_val = data;
11278         __le32 *desc_data;
11279         int nodata_num;
11280         int cmd_num;
11281         int i, k, n;
11282         int ret;
11283
11284         if (regs_num == 0)
11285                 return 0;
11286
11287         nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
11288         cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
11289                                HCLGE_32_BIT_REG_RTN_DATANUM);
11290         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11291         if (!desc)
11292                 return -ENOMEM;
11293
11294         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
11295         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11296         if (ret) {
11297                 dev_err(&hdev->pdev->dev,
11298                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
11299                 kfree(desc);
11300                 return ret;
11301         }
11302
11303         for (i = 0; i < cmd_num; i++) {
11304                 if (i == 0) {
11305                         desc_data = (__le32 *)(&desc[i].data[0]);
11306                         n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
11307                 } else {
11308                         desc_data = (__le32 *)(&desc[i]);
11309                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
11310                 }
11311                 for (k = 0; k < n; k++) {
11312                         *reg_val++ = le32_to_cpu(*desc_data++);
11313
11314                         regs_num--;
11315                         if (!regs_num)
11316                                 break;
11317                 }
11318         }
11319
11320         kfree(desc);
11321         return 0;
11322 }
11323
11324 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11325                                  void *data)
11326 {
11327 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
11328 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
11329
11330         struct hclge_desc *desc;
11331         u64 *reg_val = data;
11332         __le64 *desc_data;
11333         int nodata_len;
11334         int cmd_num;
11335         int i, k, n;
11336         int ret;
11337
11338         if (regs_num == 0)
11339                 return 0;
11340
11341         nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
11342         cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
11343                                HCLGE_64_BIT_REG_RTN_DATANUM);
11344         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11345         if (!desc)
11346                 return -ENOMEM;
11347
11348         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
11349         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11350         if (ret) {
11351                 dev_err(&hdev->pdev->dev,
11352                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
11353                 kfree(desc);
11354                 return ret;
11355         }
11356
11357         for (i = 0; i < cmd_num; i++) {
11358                 if (i == 0) {
11359                         desc_data = (__le64 *)(&desc[i].data[0]);
11360                         n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
11361                 } else {
11362                         desc_data = (__le64 *)(&desc[i]);
11363                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
11364                 }
11365                 for (k = 0; k < n; k++) {
11366                         *reg_val++ = le64_to_cpu(*desc_data++);
11367
11368                         regs_num--;
11369                         if (!regs_num)
11370                                 break;
11371                 }
11372         }
11373
11374         kfree(desc);
11375         return 0;
11376 }
11377
11378 #define MAX_SEPARATE_NUM        4
11379 #define SEPARATOR_VALUE         0xFDFCFBFA
11380 #define REG_NUM_PER_LINE        4
11381 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
11382 #define REG_SEPARATOR_LINE      1
11383 #define REG_NUM_REMAIN_MASK     3
11384 #define BD_LIST_MAX_NUM         30
11385
11386 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
11387 {
11388         int i;
11389
11390         /* initialize command BD except the last one */
11391         for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
11392                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
11393                                            true);
11394                 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11395         }
11396
11397         /* initialize the last command BD */
11398         hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
11399
11400         return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
11401 }
11402
11403 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
11404                                     int *bd_num_list,
11405                                     u32 type_num)
11406 {
11407         u32 entries_per_desc, desc_index, index, offset, i;
11408         struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
11409         int ret;
11410
11411         ret = hclge_query_bd_num_cmd_send(hdev, desc);
11412         if (ret) {
11413                 dev_err(&hdev->pdev->dev,
11414                         "Get dfx bd num fail, status is %d.\n", ret);
11415                 return ret;
11416         }
11417
11418         entries_per_desc = ARRAY_SIZE(desc[0].data);
11419         for (i = 0; i < type_num; i++) {
11420                 offset = hclge_dfx_bd_offset_list[i];
11421                 index = offset % entries_per_desc;
11422                 desc_index = offset / entries_per_desc;
11423                 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
11424         }
11425
11426         return ret;
11427 }
11428
11429 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
11430                                   struct hclge_desc *desc_src, int bd_num,
11431                                   enum hclge_opcode_type cmd)
11432 {
11433         struct hclge_desc *desc = desc_src;
11434         int i, ret;
11435
11436         hclge_cmd_setup_basic_desc(desc, cmd, true);
11437         for (i = 0; i < bd_num - 1; i++) {
11438                 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11439                 desc++;
11440                 hclge_cmd_setup_basic_desc(desc, cmd, true);
11441         }
11442
11443         desc = desc_src;
11444         ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
11445         if (ret)
11446                 dev_err(&hdev->pdev->dev,
11447                         "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
11448                         cmd, ret);
11449
11450         return ret;
11451 }
11452
11453 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
11454                                     void *data)
11455 {
11456         int entries_per_desc, reg_num, separator_num, desc_index, index, i;
11457         struct hclge_desc *desc = desc_src;
11458         u32 *reg = data;
11459
11460         entries_per_desc = ARRAY_SIZE(desc->data);
11461         reg_num = entries_per_desc * bd_num;
11462         separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
11463         for (i = 0; i < reg_num; i++) {
11464                 index = i % entries_per_desc;
11465                 desc_index = i / entries_per_desc;
11466                 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
11467         }
11468         for (i = 0; i < separator_num; i++)
11469                 *reg++ = SEPARATOR_VALUE;
11470
11471         return reg_num + separator_num;
11472 }
11473
11474 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
11475 {
11476         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11477         int data_len_per_desc, bd_num, i;
11478         int bd_num_list[BD_LIST_MAX_NUM];
11479         u32 data_len;
11480         int ret;
11481
11482         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11483         if (ret) {
11484                 dev_err(&hdev->pdev->dev,
11485                         "Get dfx reg bd num fail, status is %d.\n", ret);
11486                 return ret;
11487         }
11488
11489         data_len_per_desc = sizeof_field(struct hclge_desc, data);
11490         *len = 0;
11491         for (i = 0; i < dfx_reg_type_num; i++) {
11492                 bd_num = bd_num_list[i];
11493                 data_len = data_len_per_desc * bd_num;
11494                 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
11495         }
11496
11497         return ret;
11498 }
11499
11500 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
11501 {
11502         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11503         int bd_num, bd_num_max, buf_len, i;
11504         int bd_num_list[BD_LIST_MAX_NUM];
11505         struct hclge_desc *desc_src;
11506         u32 *reg = data;
11507         int ret;
11508
11509         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11510         if (ret) {
11511                 dev_err(&hdev->pdev->dev,
11512                         "Get dfx reg bd num fail, status is %d.\n", ret);
11513                 return ret;
11514         }
11515
11516         bd_num_max = bd_num_list[0];
11517         for (i = 1; i < dfx_reg_type_num; i++)
11518                 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
11519
11520         buf_len = sizeof(*desc_src) * bd_num_max;
11521         desc_src = kzalloc(buf_len, GFP_KERNEL);
11522         if (!desc_src)
11523                 return -ENOMEM;
11524
11525         for (i = 0; i < dfx_reg_type_num; i++) {
11526                 bd_num = bd_num_list[i];
11527                 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
11528                                              hclge_dfx_reg_opcode_list[i]);
11529                 if (ret) {
11530                         dev_err(&hdev->pdev->dev,
11531                                 "Get dfx reg fail, status is %d.\n", ret);
11532                         break;
11533                 }
11534
11535                 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
11536         }
11537
11538         kfree(desc_src);
11539         return ret;
11540 }
11541
11542 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
11543                               struct hnae3_knic_private_info *kinfo)
11544 {
11545 #define HCLGE_RING_REG_OFFSET           0x200
11546 #define HCLGE_RING_INT_REG_OFFSET       0x4
11547
11548         int i, j, reg_num, separator_num;
11549         int data_num_sum;
11550         u32 *reg = data;
11551
11552         /* fetching per-PF registers valus from PF PCIe register space */
11553         reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
11554         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11555         for (i = 0; i < reg_num; i++)
11556                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
11557         for (i = 0; i < separator_num; i++)
11558                 *reg++ = SEPARATOR_VALUE;
11559         data_num_sum = reg_num + separator_num;
11560
11561         reg_num = ARRAY_SIZE(common_reg_addr_list);
11562         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11563         for (i = 0; i < reg_num; i++)
11564                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
11565         for (i = 0; i < separator_num; i++)
11566                 *reg++ = SEPARATOR_VALUE;
11567         data_num_sum += reg_num + separator_num;
11568
11569         reg_num = ARRAY_SIZE(ring_reg_addr_list);
11570         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11571         for (j = 0; j < kinfo->num_tqps; j++) {
11572                 for (i = 0; i < reg_num; i++)
11573                         *reg++ = hclge_read_dev(&hdev->hw,
11574                                                 ring_reg_addr_list[i] +
11575                                                 HCLGE_RING_REG_OFFSET * j);
11576                 for (i = 0; i < separator_num; i++)
11577                         *reg++ = SEPARATOR_VALUE;
11578         }
11579         data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
11580
11581         reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
11582         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11583         for (j = 0; j < hdev->num_msi_used - 1; j++) {
11584                 for (i = 0; i < reg_num; i++)
11585                         *reg++ = hclge_read_dev(&hdev->hw,
11586                                                 tqp_intr_reg_addr_list[i] +
11587                                                 HCLGE_RING_INT_REG_OFFSET * j);
11588                 for (i = 0; i < separator_num; i++)
11589                         *reg++ = SEPARATOR_VALUE;
11590         }
11591         data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
11592
11593         return data_num_sum;
11594 }
11595
11596 static int hclge_get_regs_len(struct hnae3_handle *handle)
11597 {
11598         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
11599         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11600         struct hclge_vport *vport = hclge_get_vport(handle);
11601         struct hclge_dev *hdev = vport->back;
11602         int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
11603         int regs_lines_32_bit, regs_lines_64_bit;
11604         int ret;
11605
11606         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
11607         if (ret) {
11608                 dev_err(&hdev->pdev->dev,
11609                         "Get register number failed, ret = %d.\n", ret);
11610                 return ret;
11611         }
11612
11613         ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
11614         if (ret) {
11615                 dev_err(&hdev->pdev->dev,
11616                         "Get dfx reg len failed, ret = %d.\n", ret);
11617                 return ret;
11618         }
11619
11620         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
11621                 REG_SEPARATOR_LINE;
11622         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
11623                 REG_SEPARATOR_LINE;
11624         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
11625                 REG_SEPARATOR_LINE;
11626         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
11627                 REG_SEPARATOR_LINE;
11628         regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
11629                 REG_SEPARATOR_LINE;
11630         regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
11631                 REG_SEPARATOR_LINE;
11632
11633         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
11634                 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
11635                 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
11636 }
11637
11638 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
11639                            void *data)
11640 {
11641         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11642         struct hclge_vport *vport = hclge_get_vport(handle);
11643         struct hclge_dev *hdev = vport->back;
11644         u32 regs_num_32_bit, regs_num_64_bit;
11645         int i, reg_num, separator_num, ret;
11646         u32 *reg = data;
11647
11648         *version = hdev->fw_version;
11649
11650         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
11651         if (ret) {
11652                 dev_err(&hdev->pdev->dev,
11653                         "Get register number failed, ret = %d.\n", ret);
11654                 return;
11655         }
11656
11657         reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
11658
11659         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
11660         if (ret) {
11661                 dev_err(&hdev->pdev->dev,
11662                         "Get 32 bit register failed, ret = %d.\n", ret);
11663                 return;
11664         }
11665         reg_num = regs_num_32_bit;
11666         reg += reg_num;
11667         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11668         for (i = 0; i < separator_num; i++)
11669                 *reg++ = SEPARATOR_VALUE;
11670
11671         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
11672         if (ret) {
11673                 dev_err(&hdev->pdev->dev,
11674                         "Get 64 bit register failed, ret = %d.\n", ret);
11675                 return;
11676         }
11677         reg_num = regs_num_64_bit * 2;
11678         reg += reg_num;
11679         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11680         for (i = 0; i < separator_num; i++)
11681                 *reg++ = SEPARATOR_VALUE;
11682
11683         ret = hclge_get_dfx_reg(hdev, reg);
11684         if (ret)
11685                 dev_err(&hdev->pdev->dev,
11686                         "Get dfx register failed, ret = %d.\n", ret);
11687 }
11688
11689 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
11690 {
11691         struct hclge_set_led_state_cmd *req;
11692         struct hclge_desc desc;
11693         int ret;
11694
11695         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11696
11697         req = (struct hclge_set_led_state_cmd *)desc.data;
11698         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11699                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
11700
11701         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11702         if (ret)
11703                 dev_err(&hdev->pdev->dev,
11704                         "Send set led state cmd error, ret =%d\n", ret);
11705
11706         return ret;
11707 }
11708
11709 enum hclge_led_status {
11710         HCLGE_LED_OFF,
11711         HCLGE_LED_ON,
11712         HCLGE_LED_NO_CHANGE = 0xFF,
11713 };
11714
11715 static int hclge_set_led_id(struct hnae3_handle *handle,
11716                             enum ethtool_phys_id_state status)
11717 {
11718         struct hclge_vport *vport = hclge_get_vport(handle);
11719         struct hclge_dev *hdev = vport->back;
11720
11721         switch (status) {
11722         case ETHTOOL_ID_ACTIVE:
11723                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
11724         case ETHTOOL_ID_INACTIVE:
11725                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
11726         default:
11727                 return -EINVAL;
11728         }
11729 }
11730
11731 static void hclge_get_link_mode(struct hnae3_handle *handle,
11732                                 unsigned long *supported,
11733                                 unsigned long *advertising)
11734 {
11735         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11736         struct hclge_vport *vport = hclge_get_vport(handle);
11737         struct hclge_dev *hdev = vport->back;
11738         unsigned int idx = 0;
11739
11740         for (; idx < size; idx++) {
11741                 supported[idx] = hdev->hw.mac.supported[idx];
11742                 advertising[idx] = hdev->hw.mac.advertising[idx];
11743         }
11744 }
11745
11746 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
11747 {
11748         struct hclge_vport *vport = hclge_get_vport(handle);
11749         struct hclge_dev *hdev = vport->back;
11750
11751         return hclge_config_gro(hdev, enable);
11752 }
11753
11754 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11755 {
11756         struct hclge_vport *vport = &hdev->vport[0];
11757         struct hnae3_handle *handle = &vport->nic;
11758         u8 tmp_flags;
11759         int ret;
11760
11761         if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11762                 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11763                 vport->last_promisc_flags = vport->overflow_promisc_flags;
11764         }
11765
11766         if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11767                 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11768                 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11769                                              tmp_flags & HNAE3_MPE);
11770                 if (!ret) {
11771                         clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11772                         hclge_enable_vlan_filter(handle,
11773                                                  tmp_flags & HNAE3_VLAN_FLTR);
11774                 }
11775         }
11776 }
11777
11778 static bool hclge_module_existed(struct hclge_dev *hdev)
11779 {
11780         struct hclge_desc desc;
11781         u32 existed;
11782         int ret;
11783
11784         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11785         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11786         if (ret) {
11787                 dev_err(&hdev->pdev->dev,
11788                         "failed to get SFP exist state, ret = %d\n", ret);
11789                 return false;
11790         }
11791
11792         existed = le32_to_cpu(desc.data[0]);
11793
11794         return existed != 0;
11795 }
11796
11797 /* need 6 bds(total 140 bytes) in one reading
11798  * return the number of bytes actually read, 0 means read failed.
11799  */
11800 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
11801                                      u32 len, u8 *data)
11802 {
11803         struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
11804         struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
11805         u16 read_len;
11806         u16 copy_len;
11807         int ret;
11808         int i;
11809
11810         /* setup all 6 bds to read module eeprom info. */
11811         for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11812                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
11813                                            true);
11814
11815                 /* bd0~bd4 need next flag */
11816                 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
11817                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11818         }
11819
11820         /* setup bd0, this bd contains offset and read length. */
11821         sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
11822         sfp_info_bd0->offset = cpu_to_le16((u16)offset);
11823         read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
11824         sfp_info_bd0->read_len = cpu_to_le16(read_len);
11825
11826         ret = hclge_cmd_send(&hdev->hw, desc, i);
11827         if (ret) {
11828                 dev_err(&hdev->pdev->dev,
11829                         "failed to get SFP eeprom info, ret = %d\n", ret);
11830                 return 0;
11831         }
11832
11833         /* copy sfp info from bd0 to out buffer. */
11834         copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
11835         memcpy(data, sfp_info_bd0->data, copy_len);
11836         read_len = copy_len;
11837
11838         /* copy sfp info from bd1~bd5 to out buffer if needed. */
11839         for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11840                 if (read_len >= len)
11841                         return read_len;
11842
11843                 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
11844                 memcpy(data + read_len, desc[i].data, copy_len);
11845                 read_len += copy_len;
11846         }
11847
11848         return read_len;
11849 }
11850
11851 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
11852                                    u32 len, u8 *data)
11853 {
11854         struct hclge_vport *vport = hclge_get_vport(handle);
11855         struct hclge_dev *hdev = vport->back;
11856         u32 read_len = 0;
11857         u16 data_len;
11858
11859         if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
11860                 return -EOPNOTSUPP;
11861
11862         if (!hclge_module_existed(hdev))
11863                 return -ENXIO;
11864
11865         while (read_len < len) {
11866                 data_len = hclge_get_sfp_eeprom_info(hdev,
11867                                                      offset + read_len,
11868                                                      len - read_len,
11869                                                      data + read_len);
11870                 if (!data_len)
11871                         return -EIO;
11872
11873                 read_len += data_len;
11874         }
11875
11876         return 0;
11877 }
11878
11879 static const struct hnae3_ae_ops hclge_ops = {
11880         .init_ae_dev = hclge_init_ae_dev,
11881         .uninit_ae_dev = hclge_uninit_ae_dev,
11882         .flr_prepare = hclge_flr_prepare,
11883         .flr_done = hclge_flr_done,
11884         .init_client_instance = hclge_init_client_instance,
11885         .uninit_client_instance = hclge_uninit_client_instance,
11886         .map_ring_to_vector = hclge_map_ring_to_vector,
11887         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
11888         .get_vector = hclge_get_vector,
11889         .put_vector = hclge_put_vector,
11890         .set_promisc_mode = hclge_set_promisc_mode,
11891         .request_update_promisc_mode = hclge_request_update_promisc_mode,
11892         .set_loopback = hclge_set_loopback,
11893         .start = hclge_ae_start,
11894         .stop = hclge_ae_stop,
11895         .client_start = hclge_client_start,
11896         .client_stop = hclge_client_stop,
11897         .get_status = hclge_get_status,
11898         .get_ksettings_an_result = hclge_get_ksettings_an_result,
11899         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
11900         .get_media_type = hclge_get_media_type,
11901         .check_port_speed = hclge_check_port_speed,
11902         .get_fec = hclge_get_fec,
11903         .set_fec = hclge_set_fec,
11904         .get_rss_key_size = hclge_get_rss_key_size,
11905         .get_rss = hclge_get_rss,
11906         .set_rss = hclge_set_rss,
11907         .set_rss_tuple = hclge_set_rss_tuple,
11908         .get_rss_tuple = hclge_get_rss_tuple,
11909         .get_tc_size = hclge_get_tc_size,
11910         .get_mac_addr = hclge_get_mac_addr,
11911         .set_mac_addr = hclge_set_mac_addr,
11912         .do_ioctl = hclge_do_ioctl,
11913         .add_uc_addr = hclge_add_uc_addr,
11914         .rm_uc_addr = hclge_rm_uc_addr,
11915         .add_mc_addr = hclge_add_mc_addr,
11916         .rm_mc_addr = hclge_rm_mc_addr,
11917         .set_autoneg = hclge_set_autoneg,
11918         .get_autoneg = hclge_get_autoneg,
11919         .restart_autoneg = hclge_restart_autoneg,
11920         .halt_autoneg = hclge_halt_autoneg,
11921         .get_pauseparam = hclge_get_pauseparam,
11922         .set_pauseparam = hclge_set_pauseparam,
11923         .set_mtu = hclge_set_mtu,
11924         .reset_queue = hclge_reset_tqp,
11925         .get_stats = hclge_get_stats,
11926         .get_mac_stats = hclge_get_mac_stat,
11927         .update_stats = hclge_update_stats,
11928         .get_strings = hclge_get_strings,
11929         .get_sset_count = hclge_get_sset_count,
11930         .get_fw_version = hclge_get_fw_version,
11931         .get_mdix_mode = hclge_get_mdix_mode,
11932         .enable_vlan_filter = hclge_enable_vlan_filter,
11933         .set_vlan_filter = hclge_set_vlan_filter,
11934         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
11935         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
11936         .reset_event = hclge_reset_event,
11937         .get_reset_level = hclge_get_reset_level,
11938         .set_default_reset_request = hclge_set_def_reset_request,
11939         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
11940         .set_channels = hclge_set_channels,
11941         .get_channels = hclge_get_channels,
11942         .get_regs_len = hclge_get_regs_len,
11943         .get_regs = hclge_get_regs,
11944         .set_led_id = hclge_set_led_id,
11945         .get_link_mode = hclge_get_link_mode,
11946         .add_fd_entry = hclge_add_fd_entry,
11947         .del_fd_entry = hclge_del_fd_entry,
11948         .del_all_fd_entries = hclge_del_all_fd_entries,
11949         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
11950         .get_fd_rule_info = hclge_get_fd_rule_info,
11951         .get_fd_all_rules = hclge_get_all_rules,
11952         .enable_fd = hclge_enable_fd,
11953         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
11954         .dbg_run_cmd = hclge_dbg_run_cmd,
11955         .dbg_read_cmd = hclge_dbg_read_cmd,
11956         .handle_hw_ras_error = hclge_handle_hw_ras_error,
11957         .get_hw_reset_stat = hclge_get_hw_reset_stat,
11958         .ae_dev_resetting = hclge_ae_dev_resetting,
11959         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
11960         .set_gro_en = hclge_gro_en,
11961         .get_global_queue_id = hclge_covert_handle_qid_global,
11962         .set_timer_task = hclge_set_timer_task,
11963         .mac_connect_phy = hclge_mac_connect_phy,
11964         .mac_disconnect_phy = hclge_mac_disconnect_phy,
11965         .get_vf_config = hclge_get_vf_config,
11966         .set_vf_link_state = hclge_set_vf_link_state,
11967         .set_vf_spoofchk = hclge_set_vf_spoofchk,
11968         .set_vf_trust = hclge_set_vf_trust,
11969         .set_vf_rate = hclge_set_vf_rate,
11970         .set_vf_mac = hclge_set_vf_mac,
11971         .get_module_eeprom = hclge_get_module_eeprom,
11972         .get_cmdq_stat = hclge_get_cmdq_stat,
11973         .add_cls_flower = hclge_add_cls_flower,
11974         .del_cls_flower = hclge_del_cls_flower,
11975         .cls_flower_active = hclge_is_cls_flower_active,
11976 };
11977
11978 static struct hnae3_ae_algo ae_algo = {
11979         .ops = &hclge_ops,
11980         .pdev_id_table = ae_algo_pci_tbl,
11981 };
11982
11983 static int hclge_init(void)
11984 {
11985         pr_info("%s is initializing\n", HCLGE_NAME);
11986
11987         hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
11988         if (!hclge_wq) {
11989                 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
11990                 return -ENOMEM;
11991         }
11992
11993         hnae3_register_ae_algo(&ae_algo);
11994
11995         return 0;
11996 }
11997
11998 static void hclge_exit(void)
11999 {
12000         hnae3_unregister_ae_algo(&ae_algo);
12001         destroy_workqueue(hclge_wq);
12002 }
12003 module_init(hclge_init);
12004 module_exit(hclge_exit);
12005
12006 MODULE_LICENSE("GPL");
12007 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
12008 MODULE_DESCRIPTION("HCLGE Driver");
12009 MODULE_VERSION(HCLGE_MOD_VERSION);