net: hns3: use FEC capability queried from firmware
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/ipv6.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
23 #include "hclge_tm.h"
24 #include "hclge_err.h"
25 #include "hnae3.h"
26
27 #define HCLGE_NAME                      "hclge"
28 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
29 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30
31 #define HCLGE_BUF_SIZE_UNIT     256U
32 #define HCLGE_BUF_MUL_BY        2
33 #define HCLGE_BUF_DIV_BY        2
34 #define NEED_RESERVE_TC_NUM     2
35 #define BUF_MAX_PERCENT         100
36 #define BUF_RESERVE_PERCENT     90
37
38 #define HCLGE_RESET_MAX_FAIL_CNT        5
39 #define HCLGE_RESET_SYNC_TIME           100
40 #define HCLGE_PF_RESET_SYNC_TIME        20
41 #define HCLGE_PF_RESET_SYNC_CNT         1500
42
43 /* Get DFX BD number offset */
44 #define HCLGE_DFX_BIOS_BD_OFFSET        1
45 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
46 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
47 #define HCLGE_DFX_IGU_BD_OFFSET         4
48 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
49 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
50 #define HCLGE_DFX_NCSI_BD_OFFSET        7
51 #define HCLGE_DFX_RTC_BD_OFFSET         8
52 #define HCLGE_DFX_PPP_BD_OFFSET         9
53 #define HCLGE_DFX_RCB_BD_OFFSET         10
54 #define HCLGE_DFX_TQP_BD_OFFSET         11
55 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
56
57 #define HCLGE_LINK_STATUS_MS    10
58
59 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
60 static int hclge_init_vlan_config(struct hclge_dev *hdev);
61 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
62 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
63 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
64 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
65 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
66 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
67                                                    unsigned long *addr);
68 static int hclge_set_default_loopback(struct hclge_dev *hdev);
69
70 static void hclge_sync_mac_table(struct hclge_dev *hdev);
71 static void hclge_restore_hw_table(struct hclge_dev *hdev);
72 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
73
74 static struct hnae3_ae_algo ae_algo;
75
76 static struct workqueue_struct *hclge_wq;
77
78 static const struct pci_device_id ae_algo_pci_tbl[] = {
79         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
80         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
81         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
82         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
83         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
84         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
85         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
86         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
87         /* required last entry */
88         {0, }
89 };
90
91 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
92
93 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
94                                          HCLGE_CMDQ_TX_ADDR_H_REG,
95                                          HCLGE_CMDQ_TX_DEPTH_REG,
96                                          HCLGE_CMDQ_TX_TAIL_REG,
97                                          HCLGE_CMDQ_TX_HEAD_REG,
98                                          HCLGE_CMDQ_RX_ADDR_L_REG,
99                                          HCLGE_CMDQ_RX_ADDR_H_REG,
100                                          HCLGE_CMDQ_RX_DEPTH_REG,
101                                          HCLGE_CMDQ_RX_TAIL_REG,
102                                          HCLGE_CMDQ_RX_HEAD_REG,
103                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
104                                          HCLGE_CMDQ_INTR_STS_REG,
105                                          HCLGE_CMDQ_INTR_EN_REG,
106                                          HCLGE_CMDQ_INTR_GEN_REG};
107
108 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
109                                            HCLGE_VECTOR0_OTER_EN_REG,
110                                            HCLGE_MISC_RESET_STS_REG,
111                                            HCLGE_MISC_VECTOR_INT_STS,
112                                            HCLGE_GLOBAL_RESET_REG,
113                                            HCLGE_FUN_RST_ING,
114                                            HCLGE_GRO_EN_REG};
115
116 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
117                                          HCLGE_RING_RX_ADDR_H_REG,
118                                          HCLGE_RING_RX_BD_NUM_REG,
119                                          HCLGE_RING_RX_BD_LENGTH_REG,
120                                          HCLGE_RING_RX_MERGE_EN_REG,
121                                          HCLGE_RING_RX_TAIL_REG,
122                                          HCLGE_RING_RX_HEAD_REG,
123                                          HCLGE_RING_RX_FBD_NUM_REG,
124                                          HCLGE_RING_RX_OFFSET_REG,
125                                          HCLGE_RING_RX_FBD_OFFSET_REG,
126                                          HCLGE_RING_RX_STASH_REG,
127                                          HCLGE_RING_RX_BD_ERR_REG,
128                                          HCLGE_RING_TX_ADDR_L_REG,
129                                          HCLGE_RING_TX_ADDR_H_REG,
130                                          HCLGE_RING_TX_BD_NUM_REG,
131                                          HCLGE_RING_TX_PRIORITY_REG,
132                                          HCLGE_RING_TX_TC_REG,
133                                          HCLGE_RING_TX_MERGE_EN_REG,
134                                          HCLGE_RING_TX_TAIL_REG,
135                                          HCLGE_RING_TX_HEAD_REG,
136                                          HCLGE_RING_TX_FBD_NUM_REG,
137                                          HCLGE_RING_TX_OFFSET_REG,
138                                          HCLGE_RING_TX_EBD_NUM_REG,
139                                          HCLGE_RING_TX_EBD_OFFSET_REG,
140                                          HCLGE_RING_TX_BD_ERR_REG,
141                                          HCLGE_RING_EN_REG};
142
143 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
144                                              HCLGE_TQP_INTR_GL0_REG,
145                                              HCLGE_TQP_INTR_GL1_REG,
146                                              HCLGE_TQP_INTR_GL2_REG,
147                                              HCLGE_TQP_INTR_RL_REG};
148
149 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
150         "App    Loopback test",
151         "Serdes serial Loopback test",
152         "Serdes parallel Loopback test",
153         "Phy    Loopback test"
154 };
155
156 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
157         {"mac_tx_mac_pause_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
159         {"mac_rx_mac_pause_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
161         {"mac_tx_control_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
163         {"mac_rx_control_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
165         {"mac_tx_pfc_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
167         {"mac_tx_pfc_pri0_pkt_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
169         {"mac_tx_pfc_pri1_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
171         {"mac_tx_pfc_pri2_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
173         {"mac_tx_pfc_pri3_pkt_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
175         {"mac_tx_pfc_pri4_pkt_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
177         {"mac_tx_pfc_pri5_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
179         {"mac_tx_pfc_pri6_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
181         {"mac_tx_pfc_pri7_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
183         {"mac_rx_pfc_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
185         {"mac_rx_pfc_pri0_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
187         {"mac_rx_pfc_pri1_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
189         {"mac_rx_pfc_pri2_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
191         {"mac_rx_pfc_pri3_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
193         {"mac_rx_pfc_pri4_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
195         {"mac_rx_pfc_pri5_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
197         {"mac_rx_pfc_pri6_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
199         {"mac_rx_pfc_pri7_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
201         {"mac_tx_total_pkt_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
203         {"mac_tx_total_oct_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
205         {"mac_tx_good_pkt_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
207         {"mac_tx_bad_pkt_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
209         {"mac_tx_good_oct_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
211         {"mac_tx_bad_oct_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
213         {"mac_tx_uni_pkt_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
215         {"mac_tx_multi_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
217         {"mac_tx_broad_pkt_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
219         {"mac_tx_undersize_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
221         {"mac_tx_oversize_pkt_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
223         {"mac_tx_64_oct_pkt_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
225         {"mac_tx_65_127_oct_pkt_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
227         {"mac_tx_128_255_oct_pkt_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
229         {"mac_tx_256_511_oct_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
231         {"mac_tx_512_1023_oct_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
233         {"mac_tx_1024_1518_oct_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
235         {"mac_tx_1519_2047_oct_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
237         {"mac_tx_2048_4095_oct_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
239         {"mac_tx_4096_8191_oct_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
241         {"mac_tx_8192_9216_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
243         {"mac_tx_9217_12287_oct_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
245         {"mac_tx_12288_16383_oct_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
247         {"mac_tx_1519_max_good_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
249         {"mac_tx_1519_max_bad_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
251         {"mac_rx_total_pkt_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
253         {"mac_rx_total_oct_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
255         {"mac_rx_good_pkt_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
257         {"mac_rx_bad_pkt_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
259         {"mac_rx_good_oct_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
261         {"mac_rx_bad_oct_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
263         {"mac_rx_uni_pkt_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
265         {"mac_rx_multi_pkt_num",
266                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
267         {"mac_rx_broad_pkt_num",
268                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
269         {"mac_rx_undersize_pkt_num",
270                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
271         {"mac_rx_oversize_pkt_num",
272                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
273         {"mac_rx_64_oct_pkt_num",
274                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
275         {"mac_rx_65_127_oct_pkt_num",
276                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
277         {"mac_rx_128_255_oct_pkt_num",
278                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
279         {"mac_rx_256_511_oct_pkt_num",
280                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
281         {"mac_rx_512_1023_oct_pkt_num",
282                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
283         {"mac_rx_1024_1518_oct_pkt_num",
284                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
285         {"mac_rx_1519_2047_oct_pkt_num",
286                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
287         {"mac_rx_2048_4095_oct_pkt_num",
288                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
289         {"mac_rx_4096_8191_oct_pkt_num",
290                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
291         {"mac_rx_8192_9216_oct_pkt_num",
292                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
293         {"mac_rx_9217_12287_oct_pkt_num",
294                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
295         {"mac_rx_12288_16383_oct_pkt_num",
296                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
297         {"mac_rx_1519_max_good_pkt_num",
298                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
299         {"mac_rx_1519_max_bad_pkt_num",
300                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
301
302         {"mac_tx_fragment_pkt_num",
303                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
304         {"mac_tx_undermin_pkt_num",
305                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
306         {"mac_tx_jabber_pkt_num",
307                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
308         {"mac_tx_err_all_pkt_num",
309                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
310         {"mac_tx_from_app_good_pkt_num",
311                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
312         {"mac_tx_from_app_bad_pkt_num",
313                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
314         {"mac_rx_fragment_pkt_num",
315                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
316         {"mac_rx_undermin_pkt_num",
317                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
318         {"mac_rx_jabber_pkt_num",
319                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
320         {"mac_rx_fcs_err_pkt_num",
321                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
322         {"mac_rx_send_app_good_pkt_num",
323                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
324         {"mac_rx_send_app_bad_pkt_num",
325                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
326 };
327
328 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
329         {
330                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
331                 .ethter_type = cpu_to_le16(ETH_P_LLDP),
332                 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
333                 .i_port_bitmap = 0x1,
334         },
335 };
336
337 static const u8 hclge_hash_key[] = {
338         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
339         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
340         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
341         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
342         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
343 };
344
345 static const u32 hclge_dfx_bd_offset_list[] = {
346         HCLGE_DFX_BIOS_BD_OFFSET,
347         HCLGE_DFX_SSU_0_BD_OFFSET,
348         HCLGE_DFX_SSU_1_BD_OFFSET,
349         HCLGE_DFX_IGU_BD_OFFSET,
350         HCLGE_DFX_RPU_0_BD_OFFSET,
351         HCLGE_DFX_RPU_1_BD_OFFSET,
352         HCLGE_DFX_NCSI_BD_OFFSET,
353         HCLGE_DFX_RTC_BD_OFFSET,
354         HCLGE_DFX_PPP_BD_OFFSET,
355         HCLGE_DFX_RCB_BD_OFFSET,
356         HCLGE_DFX_TQP_BD_OFFSET,
357         HCLGE_DFX_SSU_2_BD_OFFSET
358 };
359
360 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
361         HCLGE_OPC_DFX_BIOS_COMMON_REG,
362         HCLGE_OPC_DFX_SSU_REG_0,
363         HCLGE_OPC_DFX_SSU_REG_1,
364         HCLGE_OPC_DFX_IGU_EGU_REG,
365         HCLGE_OPC_DFX_RPU_REG_0,
366         HCLGE_OPC_DFX_RPU_REG_1,
367         HCLGE_OPC_DFX_NCSI_REG,
368         HCLGE_OPC_DFX_RTC_REG,
369         HCLGE_OPC_DFX_PPP_REG,
370         HCLGE_OPC_DFX_RCB_REG,
371         HCLGE_OPC_DFX_TQP_REG,
372         HCLGE_OPC_DFX_SSU_REG_2
373 };
374
375 static const struct key_info meta_data_key_info[] = {
376         { PACKET_TYPE_ID, 6},
377         { IP_FRAGEMENT, 1},
378         { ROCE_TYPE, 1},
379         { NEXT_KEY, 5},
380         { VLAN_NUMBER, 2},
381         { SRC_VPORT, 12},
382         { DST_VPORT, 12},
383         { TUNNEL_PACKET, 1},
384 };
385
386 static const struct key_info tuple_key_info[] = {
387         { OUTER_DST_MAC, 48},
388         { OUTER_SRC_MAC, 48},
389         { OUTER_VLAN_TAG_FST, 16},
390         { OUTER_VLAN_TAG_SEC, 16},
391         { OUTER_ETH_TYPE, 16},
392         { OUTER_L2_RSV, 16},
393         { OUTER_IP_TOS, 8},
394         { OUTER_IP_PROTO, 8},
395         { OUTER_SRC_IP, 32},
396         { OUTER_DST_IP, 32},
397         { OUTER_L3_RSV, 16},
398         { OUTER_SRC_PORT, 16},
399         { OUTER_DST_PORT, 16},
400         { OUTER_L4_RSV, 32},
401         { OUTER_TUN_VNI, 24},
402         { OUTER_TUN_FLOW_ID, 8},
403         { INNER_DST_MAC, 48},
404         { INNER_SRC_MAC, 48},
405         { INNER_VLAN_TAG_FST, 16},
406         { INNER_VLAN_TAG_SEC, 16},
407         { INNER_ETH_TYPE, 16},
408         { INNER_L2_RSV, 16},
409         { INNER_IP_TOS, 8},
410         { INNER_IP_PROTO, 8},
411         { INNER_SRC_IP, 32},
412         { INNER_DST_IP, 32},
413         { INNER_L3_RSV, 16},
414         { INNER_SRC_PORT, 16},
415         { INNER_DST_PORT, 16},
416         { INNER_L4_RSV, 32},
417 };
418
419 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
420 {
421 #define HCLGE_MAC_CMD_NUM 21
422
423         u64 *data = (u64 *)(&hdev->mac_stats);
424         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
425         __le64 *desc_data;
426         int i, k, n;
427         int ret;
428
429         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
430         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
431         if (ret) {
432                 dev_err(&hdev->pdev->dev,
433                         "Get MAC pkt stats fail, status = %d.\n", ret);
434
435                 return ret;
436         }
437
438         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
439                 /* for special opcode 0032, only the first desc has the head */
440                 if (unlikely(i == 0)) {
441                         desc_data = (__le64 *)(&desc[i].data[0]);
442                         n = HCLGE_RD_FIRST_STATS_NUM;
443                 } else {
444                         desc_data = (__le64 *)(&desc[i]);
445                         n = HCLGE_RD_OTHER_STATS_NUM;
446                 }
447
448                 for (k = 0; k < n; k++) {
449                         *data += le64_to_cpu(*desc_data);
450                         data++;
451                         desc_data++;
452                 }
453         }
454
455         return 0;
456 }
457
458 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
459 {
460         u64 *data = (u64 *)(&hdev->mac_stats);
461         struct hclge_desc *desc;
462         __le64 *desc_data;
463         u16 i, k, n;
464         int ret;
465
466         /* This may be called inside atomic sections,
467          * so GFP_ATOMIC is more suitalbe here
468          */
469         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
470         if (!desc)
471                 return -ENOMEM;
472
473         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
474         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
475         if (ret) {
476                 kfree(desc);
477                 return ret;
478         }
479
480         for (i = 0; i < desc_num; i++) {
481                 /* for special opcode 0034, only the first desc has the head */
482                 if (i == 0) {
483                         desc_data = (__le64 *)(&desc[i].data[0]);
484                         n = HCLGE_RD_FIRST_STATS_NUM;
485                 } else {
486                         desc_data = (__le64 *)(&desc[i]);
487                         n = HCLGE_RD_OTHER_STATS_NUM;
488                 }
489
490                 for (k = 0; k < n; k++) {
491                         *data += le64_to_cpu(*desc_data);
492                         data++;
493                         desc_data++;
494                 }
495         }
496
497         kfree(desc);
498
499         return 0;
500 }
501
502 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
503 {
504         struct hclge_desc desc;
505         __le32 *desc_data;
506         u32 reg_num;
507         int ret;
508
509         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
510         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
511         if (ret)
512                 return ret;
513
514         desc_data = (__le32 *)(&desc.data[0]);
515         reg_num = le32_to_cpu(*desc_data);
516
517         *desc_num = 1 + ((reg_num - 3) >> 2) +
518                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
519
520         return 0;
521 }
522
523 static int hclge_mac_update_stats(struct hclge_dev *hdev)
524 {
525         u32 desc_num;
526         int ret;
527
528         ret = hclge_mac_query_reg_num(hdev, &desc_num);
529
530         /* The firmware supports the new statistics acquisition method */
531         if (!ret)
532                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
533         else if (ret == -EOPNOTSUPP)
534                 ret = hclge_mac_update_stats_defective(hdev);
535         else
536                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
537
538         return ret;
539 }
540
541 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
542 {
543         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
544         struct hclge_vport *vport = hclge_get_vport(handle);
545         struct hclge_dev *hdev = vport->back;
546         struct hnae3_queue *queue;
547         struct hclge_desc desc[1];
548         struct hclge_tqp *tqp;
549         int ret, i;
550
551         for (i = 0; i < kinfo->num_tqps; i++) {
552                 queue = handle->kinfo.tqp[i];
553                 tqp = container_of(queue, struct hclge_tqp, q);
554                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
555                 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
556                                            true);
557
558                 desc[0].data[0] = cpu_to_le32(tqp->index);
559                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
560                 if (ret) {
561                         dev_err(&hdev->pdev->dev,
562                                 "Query tqp stat fail, status = %d,queue = %d\n",
563                                 ret, i);
564                         return ret;
565                 }
566                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
567                         le32_to_cpu(desc[0].data[1]);
568         }
569
570         for (i = 0; i < kinfo->num_tqps; i++) {
571                 queue = handle->kinfo.tqp[i];
572                 tqp = container_of(queue, struct hclge_tqp, q);
573                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
574                 hclge_cmd_setup_basic_desc(&desc[0],
575                                            HCLGE_OPC_QUERY_TX_STATS,
576                                            true);
577
578                 desc[0].data[0] = cpu_to_le32(tqp->index);
579                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
580                 if (ret) {
581                         dev_err(&hdev->pdev->dev,
582                                 "Query tqp stat fail, status = %d,queue = %d\n",
583                                 ret, i);
584                         return ret;
585                 }
586                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
587                         le32_to_cpu(desc[0].data[1]);
588         }
589
590         return 0;
591 }
592
593 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
594 {
595         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
596         struct hclge_tqp *tqp;
597         u64 *buff = data;
598         int i;
599
600         for (i = 0; i < kinfo->num_tqps; i++) {
601                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
602                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
603         }
604
605         for (i = 0; i < kinfo->num_tqps; i++) {
606                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
607                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
608         }
609
610         return buff;
611 }
612
613 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
614 {
615         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
616
617         /* each tqp has TX & RX two queues */
618         return kinfo->num_tqps * (2);
619 }
620
621 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
622 {
623         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
624         u8 *buff = data;
625         int i;
626
627         for (i = 0; i < kinfo->num_tqps; i++) {
628                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
629                         struct hclge_tqp, q);
630                 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
631                          tqp->index);
632                 buff = buff + ETH_GSTRING_LEN;
633         }
634
635         for (i = 0; i < kinfo->num_tqps; i++) {
636                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
637                         struct hclge_tqp, q);
638                 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
639                          tqp->index);
640                 buff = buff + ETH_GSTRING_LEN;
641         }
642
643         return buff;
644 }
645
646 static u64 *hclge_comm_get_stats(const void *comm_stats,
647                                  const struct hclge_comm_stats_str strs[],
648                                  int size, u64 *data)
649 {
650         u64 *buf = data;
651         u32 i;
652
653         for (i = 0; i < size; i++)
654                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
655
656         return buf + size;
657 }
658
659 static u8 *hclge_comm_get_strings(u32 stringset,
660                                   const struct hclge_comm_stats_str strs[],
661                                   int size, u8 *data)
662 {
663         char *buff = (char *)data;
664         u32 i;
665
666         if (stringset != ETH_SS_STATS)
667                 return buff;
668
669         for (i = 0; i < size; i++) {
670                 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
671                 buff = buff + ETH_GSTRING_LEN;
672         }
673
674         return (u8 *)buff;
675 }
676
677 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
678 {
679         struct hnae3_handle *handle;
680         int status;
681
682         handle = &hdev->vport[0].nic;
683         if (handle->client) {
684                 status = hclge_tqps_update_stats(handle);
685                 if (status) {
686                         dev_err(&hdev->pdev->dev,
687                                 "Update TQPS stats fail, status = %d.\n",
688                                 status);
689                 }
690         }
691
692         status = hclge_mac_update_stats(hdev);
693         if (status)
694                 dev_err(&hdev->pdev->dev,
695                         "Update MAC stats fail, status = %d.\n", status);
696 }
697
698 static void hclge_update_stats(struct hnae3_handle *handle,
699                                struct net_device_stats *net_stats)
700 {
701         struct hclge_vport *vport = hclge_get_vport(handle);
702         struct hclge_dev *hdev = vport->back;
703         int status;
704
705         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
706                 return;
707
708         status = hclge_mac_update_stats(hdev);
709         if (status)
710                 dev_err(&hdev->pdev->dev,
711                         "Update MAC stats fail, status = %d.\n",
712                         status);
713
714         status = hclge_tqps_update_stats(handle);
715         if (status)
716                 dev_err(&hdev->pdev->dev,
717                         "Update TQPS stats fail, status = %d.\n",
718                         status);
719
720         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
721 }
722
723 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
724 {
725 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
726                 HNAE3_SUPPORT_PHY_LOOPBACK |\
727                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
728                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
729
730         struct hclge_vport *vport = hclge_get_vport(handle);
731         struct hclge_dev *hdev = vport->back;
732         int count = 0;
733
734         /* Loopback test support rules:
735          * mac: only GE mode support
736          * serdes: all mac mode will support include GE/XGE/LGE/CGE
737          * phy: only support when phy device exist on board
738          */
739         if (stringset == ETH_SS_TEST) {
740                 /* clear loopback bit flags at first */
741                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
742                 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
743                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
744                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
745                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
746                         count += 1;
747                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
748                 }
749
750                 count += 2;
751                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
752                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
753
754                 if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
755                     hdev->hw.mac.phydev->drv->set_loopback) {
756                         count += 1;
757                         handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
758                 }
759
760         } else if (stringset == ETH_SS_STATS) {
761                 count = ARRAY_SIZE(g_mac_stats_string) +
762                         hclge_tqps_get_sset_count(handle, stringset);
763         }
764
765         return count;
766 }
767
768 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
769                               u8 *data)
770 {
771         u8 *p = (char *)data;
772         int size;
773
774         if (stringset == ETH_SS_STATS) {
775                 size = ARRAY_SIZE(g_mac_stats_string);
776                 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
777                                            size, p);
778                 p = hclge_tqps_get_strings(handle, p);
779         } else if (stringset == ETH_SS_TEST) {
780                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
781                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
782                                ETH_GSTRING_LEN);
783                         p += ETH_GSTRING_LEN;
784                 }
785                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
786                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
787                                ETH_GSTRING_LEN);
788                         p += ETH_GSTRING_LEN;
789                 }
790                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
791                         memcpy(p,
792                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
793                                ETH_GSTRING_LEN);
794                         p += ETH_GSTRING_LEN;
795                 }
796                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
797                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
798                                ETH_GSTRING_LEN);
799                         p += ETH_GSTRING_LEN;
800                 }
801         }
802 }
803
804 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
805 {
806         struct hclge_vport *vport = hclge_get_vport(handle);
807         struct hclge_dev *hdev = vport->back;
808         u64 *p;
809
810         p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
811                                  ARRAY_SIZE(g_mac_stats_string), data);
812         p = hclge_tqps_get_stats(handle, p);
813 }
814
815 static void hclge_get_mac_stat(struct hnae3_handle *handle,
816                                struct hns3_mac_stats *mac_stats)
817 {
818         struct hclge_vport *vport = hclge_get_vport(handle);
819         struct hclge_dev *hdev = vport->back;
820
821         hclge_update_stats(handle, NULL);
822
823         mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
824         mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
825 }
826
827 static int hclge_parse_func_status(struct hclge_dev *hdev,
828                                    struct hclge_func_status_cmd *status)
829 {
830 #define HCLGE_MAC_ID_MASK       0xF
831
832         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
833                 return -EINVAL;
834
835         /* Set the pf to main pf */
836         if (status->pf_state & HCLGE_PF_STATE_MAIN)
837                 hdev->flag |= HCLGE_FLAG_MAIN;
838         else
839                 hdev->flag &= ~HCLGE_FLAG_MAIN;
840
841         hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
842         return 0;
843 }
844
845 static int hclge_query_function_status(struct hclge_dev *hdev)
846 {
847 #define HCLGE_QUERY_MAX_CNT     5
848
849         struct hclge_func_status_cmd *req;
850         struct hclge_desc desc;
851         int timeout = 0;
852         int ret;
853
854         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
855         req = (struct hclge_func_status_cmd *)desc.data;
856
857         do {
858                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
859                 if (ret) {
860                         dev_err(&hdev->pdev->dev,
861                                 "query function status failed %d.\n", ret);
862                         return ret;
863                 }
864
865                 /* Check pf reset is done */
866                 if (req->pf_state)
867                         break;
868                 usleep_range(1000, 2000);
869         } while (timeout++ < HCLGE_QUERY_MAX_CNT);
870
871         return hclge_parse_func_status(hdev, req);
872 }
873
874 static int hclge_query_pf_resource(struct hclge_dev *hdev)
875 {
876         struct hclge_pf_res_cmd *req;
877         struct hclge_desc desc;
878         int ret;
879
880         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
881         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
882         if (ret) {
883                 dev_err(&hdev->pdev->dev,
884                         "query pf resource failed %d.\n", ret);
885                 return ret;
886         }
887
888         req = (struct hclge_pf_res_cmd *)desc.data;
889         hdev->num_tqps = le16_to_cpu(req->tqp_num) +
890                          le16_to_cpu(req->ext_tqp_num);
891         hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
892
893         if (req->tx_buf_size)
894                 hdev->tx_buf_size =
895                         le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
896         else
897                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
898
899         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
900
901         if (req->dv_buf_size)
902                 hdev->dv_buf_size =
903                         le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
904         else
905                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
906
907         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
908
909         hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
910         if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
911                 dev_err(&hdev->pdev->dev,
912                         "only %u msi resources available, not enough for pf(min:2).\n",
913                         hdev->num_nic_msi);
914                 return -EINVAL;
915         }
916
917         if (hnae3_dev_roce_supported(hdev)) {
918                 hdev->num_roce_msi =
919                         le16_to_cpu(req->pf_intr_vector_number_roce);
920
921                 /* PF should have NIC vectors and Roce vectors,
922                  * NIC vectors are queued before Roce vectors.
923                  */
924                 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
925         } else {
926                 hdev->num_msi = hdev->num_nic_msi;
927         }
928
929         return 0;
930 }
931
932 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
933 {
934         switch (speed_cmd) {
935         case 6:
936                 *speed = HCLGE_MAC_SPEED_10M;
937                 break;
938         case 7:
939                 *speed = HCLGE_MAC_SPEED_100M;
940                 break;
941         case 0:
942                 *speed = HCLGE_MAC_SPEED_1G;
943                 break;
944         case 1:
945                 *speed = HCLGE_MAC_SPEED_10G;
946                 break;
947         case 2:
948                 *speed = HCLGE_MAC_SPEED_25G;
949                 break;
950         case 3:
951                 *speed = HCLGE_MAC_SPEED_40G;
952                 break;
953         case 4:
954                 *speed = HCLGE_MAC_SPEED_50G;
955                 break;
956         case 5:
957                 *speed = HCLGE_MAC_SPEED_100G;
958                 break;
959         case 8:
960                 *speed = HCLGE_MAC_SPEED_200G;
961                 break;
962         default:
963                 return -EINVAL;
964         }
965
966         return 0;
967 }
968
969 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
970 {
971         struct hclge_vport *vport = hclge_get_vport(handle);
972         struct hclge_dev *hdev = vport->back;
973         u32 speed_ability = hdev->hw.mac.speed_ability;
974         u32 speed_bit = 0;
975
976         switch (speed) {
977         case HCLGE_MAC_SPEED_10M:
978                 speed_bit = HCLGE_SUPPORT_10M_BIT;
979                 break;
980         case HCLGE_MAC_SPEED_100M:
981                 speed_bit = HCLGE_SUPPORT_100M_BIT;
982                 break;
983         case HCLGE_MAC_SPEED_1G:
984                 speed_bit = HCLGE_SUPPORT_1G_BIT;
985                 break;
986         case HCLGE_MAC_SPEED_10G:
987                 speed_bit = HCLGE_SUPPORT_10G_BIT;
988                 break;
989         case HCLGE_MAC_SPEED_25G:
990                 speed_bit = HCLGE_SUPPORT_25G_BIT;
991                 break;
992         case HCLGE_MAC_SPEED_40G:
993                 speed_bit = HCLGE_SUPPORT_40G_BIT;
994                 break;
995         case HCLGE_MAC_SPEED_50G:
996                 speed_bit = HCLGE_SUPPORT_50G_BIT;
997                 break;
998         case HCLGE_MAC_SPEED_100G:
999                 speed_bit = HCLGE_SUPPORT_100G_BIT;
1000                 break;
1001         case HCLGE_MAC_SPEED_200G:
1002                 speed_bit = HCLGE_SUPPORT_200G_BIT;
1003                 break;
1004         default:
1005                 return -EINVAL;
1006         }
1007
1008         if (speed_bit & speed_ability)
1009                 return 0;
1010
1011         return -EINVAL;
1012 }
1013
1014 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1015 {
1016         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1017                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1018                                  mac->supported);
1019         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1020                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1021                                  mac->supported);
1022         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1023                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1024                                  mac->supported);
1025         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1026                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1027                                  mac->supported);
1028         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1029                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1030                                  mac->supported);
1031         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1032                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1033                                  mac->supported);
1034 }
1035
1036 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1037 {
1038         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1039                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1040                                  mac->supported);
1041         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1042                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1043                                  mac->supported);
1044         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1045                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1046                                  mac->supported);
1047         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1048                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1049                                  mac->supported);
1050         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1051                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1052                                  mac->supported);
1053         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1054                 linkmode_set_bit(
1055                         ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1056                         mac->supported);
1057 }
1058
1059 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1060 {
1061         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1062                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1063                                  mac->supported);
1064         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1065                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1066                                  mac->supported);
1067         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1068                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1069                                  mac->supported);
1070         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1071                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1072                                  mac->supported);
1073         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1074                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1075                                  mac->supported);
1076         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1077                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1078                                  mac->supported);
1079 }
1080
1081 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1082 {
1083         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1084                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1085                                  mac->supported);
1086         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1087                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1088                                  mac->supported);
1089         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1090                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1091                                  mac->supported);
1092         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1093                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1094                                  mac->supported);
1095         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1096                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1097                                  mac->supported);
1098         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1099                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1100                                  mac->supported);
1101         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1102                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1103                                  mac->supported);
1104 }
1105
1106 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1107 {
1108         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1109         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1110
1111         switch (mac->speed) {
1112         case HCLGE_MAC_SPEED_10G:
1113         case HCLGE_MAC_SPEED_40G:
1114                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1115                                  mac->supported);
1116                 mac->fec_ability =
1117                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1118                 break;
1119         case HCLGE_MAC_SPEED_25G:
1120         case HCLGE_MAC_SPEED_50G:
1121                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1122                                  mac->supported);
1123                 mac->fec_ability =
1124                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1125                         BIT(HNAE3_FEC_AUTO);
1126                 break;
1127         case HCLGE_MAC_SPEED_100G:
1128         case HCLGE_MAC_SPEED_200G:
1129                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1130                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1131                 break;
1132         default:
1133                 mac->fec_ability = 0;
1134                 break;
1135         }
1136 }
1137
1138 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1139                                         u16 speed_ability)
1140 {
1141         struct hclge_mac *mac = &hdev->hw.mac;
1142
1143         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1144                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1145                                  mac->supported);
1146
1147         hclge_convert_setting_sr(mac, speed_ability);
1148         hclge_convert_setting_lr(mac, speed_ability);
1149         hclge_convert_setting_cr(mac, speed_ability);
1150         if (hnae3_dev_fec_supported(hdev))
1151                 hclge_convert_setting_fec(mac);
1152
1153         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1154         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1155         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1156 }
1157
1158 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1159                                             u16 speed_ability)
1160 {
1161         struct hclge_mac *mac = &hdev->hw.mac;
1162
1163         hclge_convert_setting_kr(mac, speed_ability);
1164         if (hnae3_dev_fec_supported(hdev))
1165                 hclge_convert_setting_fec(mac);
1166         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1167         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1168         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1169 }
1170
1171 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1172                                          u16 speed_ability)
1173 {
1174         unsigned long *supported = hdev->hw.mac.supported;
1175
1176         /* default to support all speed for GE port */
1177         if (!speed_ability)
1178                 speed_ability = HCLGE_SUPPORT_GE;
1179
1180         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1181                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1182                                  supported);
1183
1184         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1185                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1186                                  supported);
1187                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1188                                  supported);
1189         }
1190
1191         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1192                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1193                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1194         }
1195
1196         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1197         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1198         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1199         linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1200 }
1201
1202 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1203 {
1204         u8 media_type = hdev->hw.mac.media_type;
1205
1206         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1207                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1208         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1209                 hclge_parse_copper_link_mode(hdev, speed_ability);
1210         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1211                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1212 }
1213
1214 static u32 hclge_get_max_speed(u16 speed_ability)
1215 {
1216         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1217                 return HCLGE_MAC_SPEED_200G;
1218
1219         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1220                 return HCLGE_MAC_SPEED_100G;
1221
1222         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1223                 return HCLGE_MAC_SPEED_50G;
1224
1225         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1226                 return HCLGE_MAC_SPEED_40G;
1227
1228         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1229                 return HCLGE_MAC_SPEED_25G;
1230
1231         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1232                 return HCLGE_MAC_SPEED_10G;
1233
1234         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1235                 return HCLGE_MAC_SPEED_1G;
1236
1237         if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1238                 return HCLGE_MAC_SPEED_100M;
1239
1240         if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1241                 return HCLGE_MAC_SPEED_10M;
1242
1243         return HCLGE_MAC_SPEED_1G;
1244 }
1245
1246 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1247 {
1248 #define SPEED_ABILITY_EXT_SHIFT                 8
1249
1250         struct hclge_cfg_param_cmd *req;
1251         u64 mac_addr_tmp_high;
1252         u16 speed_ability_ext;
1253         u64 mac_addr_tmp;
1254         unsigned int i;
1255
1256         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1257
1258         /* get the configuration */
1259         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1260                                               HCLGE_CFG_VMDQ_M,
1261                                               HCLGE_CFG_VMDQ_S);
1262         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1263                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1264         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1265                                             HCLGE_CFG_TQP_DESC_N_M,
1266                                             HCLGE_CFG_TQP_DESC_N_S);
1267
1268         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1269                                         HCLGE_CFG_PHY_ADDR_M,
1270                                         HCLGE_CFG_PHY_ADDR_S);
1271         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1272                                           HCLGE_CFG_MEDIA_TP_M,
1273                                           HCLGE_CFG_MEDIA_TP_S);
1274         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1275                                           HCLGE_CFG_RX_BUF_LEN_M,
1276                                           HCLGE_CFG_RX_BUF_LEN_S);
1277         /* get mac_address */
1278         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1279         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1280                                             HCLGE_CFG_MAC_ADDR_H_M,
1281                                             HCLGE_CFG_MAC_ADDR_H_S);
1282
1283         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1284
1285         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1286                                              HCLGE_CFG_DEFAULT_SPEED_M,
1287                                              HCLGE_CFG_DEFAULT_SPEED_S);
1288         cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1289                                                HCLGE_CFG_RSS_SIZE_M,
1290                                                HCLGE_CFG_RSS_SIZE_S);
1291
1292         for (i = 0; i < ETH_ALEN; i++)
1293                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1294
1295         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1296         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1297
1298         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1299                                              HCLGE_CFG_SPEED_ABILITY_M,
1300                                              HCLGE_CFG_SPEED_ABILITY_S);
1301         speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1302                                             HCLGE_CFG_SPEED_ABILITY_EXT_M,
1303                                             HCLGE_CFG_SPEED_ABILITY_EXT_S);
1304         cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1305
1306         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1307                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1308                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1309         if (!cfg->umv_space)
1310                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1311
1312         cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1313                                                HCLGE_CFG_PF_RSS_SIZE_M,
1314                                                HCLGE_CFG_PF_RSS_SIZE_S);
1315
1316         /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1317          * power of 2, instead of reading out directly. This would
1318          * be more flexible for future changes and expansions.
1319          * When VF max  rss size field is HCLGE_CFG_RSS_SIZE_S,
1320          * it does not make sense if PF's field is 0. In this case, PF and VF
1321          * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1322          */
1323         cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1324                                1U << cfg->pf_rss_size_max :
1325                                cfg->vf_rss_size_max;
1326 }
1327
1328 /* hclge_get_cfg: query the static parameter from flash
1329  * @hdev: pointer to struct hclge_dev
1330  * @hcfg: the config structure to be getted
1331  */
1332 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1333 {
1334         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1335         struct hclge_cfg_param_cmd *req;
1336         unsigned int i;
1337         int ret;
1338
1339         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1340                 u32 offset = 0;
1341
1342                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1343                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1344                                            true);
1345                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1346                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1347                 /* Len should be united by 4 bytes when send to hardware */
1348                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1349                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1350                 req->offset = cpu_to_le32(offset);
1351         }
1352
1353         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1354         if (ret) {
1355                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1356                 return ret;
1357         }
1358
1359         hclge_parse_cfg(hcfg, desc);
1360
1361         return 0;
1362 }
1363
1364 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1365 {
1366 #define HCLGE_MAX_NON_TSO_BD_NUM                        8U
1367
1368         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1369
1370         ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1371         ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1372         ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1373         ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1374         ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1375         ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1376         ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1377 }
1378
1379 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1380                                   struct hclge_desc *desc)
1381 {
1382         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1383         struct hclge_dev_specs_0_cmd *req0;
1384         struct hclge_dev_specs_1_cmd *req1;
1385
1386         req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1387         req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1388
1389         ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1390         ae_dev->dev_specs.rss_ind_tbl_size =
1391                 le16_to_cpu(req0->rss_ind_tbl_size);
1392         ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1393         ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1394         ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1395         ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1396         ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1397         ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1398 }
1399
1400 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1401 {
1402         struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1403
1404         if (!dev_specs->max_non_tso_bd_num)
1405                 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1406         if (!dev_specs->rss_ind_tbl_size)
1407                 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1408         if (!dev_specs->rss_key_size)
1409                 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1410         if (!dev_specs->max_tm_rate)
1411                 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1412         if (!dev_specs->max_qset_num)
1413                 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1414         if (!dev_specs->max_int_gl)
1415                 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1416         if (!dev_specs->max_frm_size)
1417                 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1418 }
1419
1420 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1421 {
1422         struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1423         int ret;
1424         int i;
1425
1426         /* set default specifications as devices lower than version V3 do not
1427          * support querying specifications from firmware.
1428          */
1429         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1430                 hclge_set_default_dev_specs(hdev);
1431                 return 0;
1432         }
1433
1434         for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1435                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1436                                            true);
1437                 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1438         }
1439         hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1440
1441         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1442         if (ret)
1443                 return ret;
1444
1445         hclge_parse_dev_specs(hdev, desc);
1446         hclge_check_dev_specs(hdev);
1447
1448         return 0;
1449 }
1450
1451 static int hclge_get_cap(struct hclge_dev *hdev)
1452 {
1453         int ret;
1454
1455         ret = hclge_query_function_status(hdev);
1456         if (ret) {
1457                 dev_err(&hdev->pdev->dev,
1458                         "query function status error %d.\n", ret);
1459                 return ret;
1460         }
1461
1462         /* get pf resource */
1463         return hclge_query_pf_resource(hdev);
1464 }
1465
1466 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1467 {
1468 #define HCLGE_MIN_TX_DESC       64
1469 #define HCLGE_MIN_RX_DESC       64
1470
1471         if (!is_kdump_kernel())
1472                 return;
1473
1474         dev_info(&hdev->pdev->dev,
1475                  "Running kdump kernel. Using minimal resources\n");
1476
1477         /* minimal queue pairs equals to the number of vports */
1478         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1479         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1480         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1481 }
1482
1483 static int hclge_configure(struct hclge_dev *hdev)
1484 {
1485         struct hclge_cfg cfg;
1486         unsigned int i;
1487         int ret;
1488
1489         ret = hclge_get_cfg(hdev, &cfg);
1490         if (ret)
1491                 return ret;
1492
1493         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1494         hdev->base_tqp_pid = 0;
1495         hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1496         hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1497         hdev->rx_buf_len = cfg.rx_buf_len;
1498         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1499         hdev->hw.mac.media_type = cfg.media_type;
1500         hdev->hw.mac.phy_addr = cfg.phy_addr;
1501         hdev->num_tx_desc = cfg.tqp_desc_num;
1502         hdev->num_rx_desc = cfg.tqp_desc_num;
1503         hdev->tm_info.num_pg = 1;
1504         hdev->tc_max = cfg.tc_num;
1505         hdev->tm_info.hw_pfc_map = 0;
1506         hdev->wanted_umv_size = cfg.umv_space;
1507
1508         if (hnae3_dev_fd_supported(hdev)) {
1509                 hdev->fd_en = true;
1510                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1511         }
1512
1513         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1514         if (ret) {
1515                 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1516                         cfg.default_speed, ret);
1517                 return ret;
1518         }
1519
1520         hclge_parse_link_mode(hdev, cfg.speed_ability);
1521
1522         hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1523
1524         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1525             (hdev->tc_max < 1)) {
1526                 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1527                          hdev->tc_max);
1528                 hdev->tc_max = 1;
1529         }
1530
1531         /* Dev does not support DCB */
1532         if (!hnae3_dev_dcb_supported(hdev)) {
1533                 hdev->tc_max = 1;
1534                 hdev->pfc_max = 0;
1535         } else {
1536                 hdev->pfc_max = hdev->tc_max;
1537         }
1538
1539         hdev->tm_info.num_tc = 1;
1540
1541         /* Currently not support uncontiuous tc */
1542         for (i = 0; i < hdev->tm_info.num_tc; i++)
1543                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1544
1545         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1546
1547         hclge_init_kdump_kernel_config(hdev);
1548
1549         /* Set the init affinity based on pci func number */
1550         i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1551         i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1552         cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1553                         &hdev->affinity_mask);
1554
1555         return ret;
1556 }
1557
1558 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1559                             u16 tso_mss_max)
1560 {
1561         struct hclge_cfg_tso_status_cmd *req;
1562         struct hclge_desc desc;
1563
1564         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1565
1566         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1567         req->tso_mss_min = cpu_to_le16(tso_mss_min);
1568         req->tso_mss_max = cpu_to_le16(tso_mss_max);
1569
1570         return hclge_cmd_send(&hdev->hw, &desc, 1);
1571 }
1572
1573 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1574 {
1575         struct hclge_cfg_gro_status_cmd *req;
1576         struct hclge_desc desc;
1577         int ret;
1578
1579         if (!hnae3_dev_gro_supported(hdev))
1580                 return 0;
1581
1582         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1583         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1584
1585         req->gro_en = en ? 1 : 0;
1586
1587         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1588         if (ret)
1589                 dev_err(&hdev->pdev->dev,
1590                         "GRO hardware config cmd failed, ret = %d\n", ret);
1591
1592         return ret;
1593 }
1594
1595 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1596 {
1597         struct hclge_tqp *tqp;
1598         int i;
1599
1600         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1601                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1602         if (!hdev->htqp)
1603                 return -ENOMEM;
1604
1605         tqp = hdev->htqp;
1606
1607         for (i = 0; i < hdev->num_tqps; i++) {
1608                 tqp->dev = &hdev->pdev->dev;
1609                 tqp->index = i;
1610
1611                 tqp->q.ae_algo = &ae_algo;
1612                 tqp->q.buf_size = hdev->rx_buf_len;
1613                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1614                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1615
1616                 /* need an extended offset to configure queues >=
1617                  * HCLGE_TQP_MAX_SIZE_DEV_V2
1618                  */
1619                 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1620                         tqp->q.io_base = hdev->hw.io_base +
1621                                          HCLGE_TQP_REG_OFFSET +
1622                                          i * HCLGE_TQP_REG_SIZE;
1623                 else
1624                         tqp->q.io_base = hdev->hw.io_base +
1625                                          HCLGE_TQP_REG_OFFSET +
1626                                          HCLGE_TQP_EXT_REG_OFFSET +
1627                                          (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1628                                          HCLGE_TQP_REG_SIZE;
1629
1630                 tqp++;
1631         }
1632
1633         return 0;
1634 }
1635
1636 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1637                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1638 {
1639         struct hclge_tqp_map_cmd *req;
1640         struct hclge_desc desc;
1641         int ret;
1642
1643         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1644
1645         req = (struct hclge_tqp_map_cmd *)desc.data;
1646         req->tqp_id = cpu_to_le16(tqp_pid);
1647         req->tqp_vf = func_id;
1648         req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1649         if (!is_pf)
1650                 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1651         req->tqp_vid = cpu_to_le16(tqp_vid);
1652
1653         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1654         if (ret)
1655                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1656
1657         return ret;
1658 }
1659
1660 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1661 {
1662         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1663         struct hclge_dev *hdev = vport->back;
1664         int i, alloced;
1665
1666         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1667              alloced < num_tqps; i++) {
1668                 if (!hdev->htqp[i].alloced) {
1669                         hdev->htqp[i].q.handle = &vport->nic;
1670                         hdev->htqp[i].q.tqp_index = alloced;
1671                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1672                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1673                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1674                         hdev->htqp[i].alloced = true;
1675                         alloced++;
1676                 }
1677         }
1678         vport->alloc_tqps = alloced;
1679         kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1680                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1681
1682         /* ensure one to one mapping between irq and queue at default */
1683         kinfo->rss_size = min_t(u16, kinfo->rss_size,
1684                                 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1685
1686         return 0;
1687 }
1688
1689 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1690                             u16 num_tx_desc, u16 num_rx_desc)
1691
1692 {
1693         struct hnae3_handle *nic = &vport->nic;
1694         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1695         struct hclge_dev *hdev = vport->back;
1696         int ret;
1697
1698         kinfo->num_tx_desc = num_tx_desc;
1699         kinfo->num_rx_desc = num_rx_desc;
1700
1701         kinfo->rx_buf_len = hdev->rx_buf_len;
1702
1703         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1704                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1705         if (!kinfo->tqp)
1706                 return -ENOMEM;
1707
1708         ret = hclge_assign_tqp(vport, num_tqps);
1709         if (ret)
1710                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1711
1712         return ret;
1713 }
1714
1715 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1716                                   struct hclge_vport *vport)
1717 {
1718         struct hnae3_handle *nic = &vport->nic;
1719         struct hnae3_knic_private_info *kinfo;
1720         u16 i;
1721
1722         kinfo = &nic->kinfo;
1723         for (i = 0; i < vport->alloc_tqps; i++) {
1724                 struct hclge_tqp *q =
1725                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1726                 bool is_pf;
1727                 int ret;
1728
1729                 is_pf = !(vport->vport_id);
1730                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1731                                              i, is_pf);
1732                 if (ret)
1733                         return ret;
1734         }
1735
1736         return 0;
1737 }
1738
1739 static int hclge_map_tqp(struct hclge_dev *hdev)
1740 {
1741         struct hclge_vport *vport = hdev->vport;
1742         u16 i, num_vport;
1743
1744         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1745         for (i = 0; i < num_vport; i++) {
1746                 int ret;
1747
1748                 ret = hclge_map_tqp_to_vport(hdev, vport);
1749                 if (ret)
1750                         return ret;
1751
1752                 vport++;
1753         }
1754
1755         return 0;
1756 }
1757
1758 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1759 {
1760         struct hnae3_handle *nic = &vport->nic;
1761         struct hclge_dev *hdev = vport->back;
1762         int ret;
1763
1764         nic->pdev = hdev->pdev;
1765         nic->ae_algo = &ae_algo;
1766         nic->numa_node_mask = hdev->numa_node_mask;
1767
1768         ret = hclge_knic_setup(vport, num_tqps,
1769                                hdev->num_tx_desc, hdev->num_rx_desc);
1770         if (ret)
1771                 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1772
1773         return ret;
1774 }
1775
1776 static int hclge_alloc_vport(struct hclge_dev *hdev)
1777 {
1778         struct pci_dev *pdev = hdev->pdev;
1779         struct hclge_vport *vport;
1780         u32 tqp_main_vport;
1781         u32 tqp_per_vport;
1782         int num_vport, i;
1783         int ret;
1784
1785         /* We need to alloc a vport for main NIC of PF */
1786         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1787
1788         if (hdev->num_tqps < num_vport) {
1789                 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1790                         hdev->num_tqps, num_vport);
1791                 return -EINVAL;
1792         }
1793
1794         /* Alloc the same number of TQPs for every vport */
1795         tqp_per_vport = hdev->num_tqps / num_vport;
1796         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1797
1798         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1799                              GFP_KERNEL);
1800         if (!vport)
1801                 return -ENOMEM;
1802
1803         hdev->vport = vport;
1804         hdev->num_alloc_vport = num_vport;
1805
1806         if (IS_ENABLED(CONFIG_PCI_IOV))
1807                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1808
1809         for (i = 0; i < num_vport; i++) {
1810                 vport->back = hdev;
1811                 vport->vport_id = i;
1812                 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1813                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1814                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1815                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1816                 INIT_LIST_HEAD(&vport->vlan_list);
1817                 INIT_LIST_HEAD(&vport->uc_mac_list);
1818                 INIT_LIST_HEAD(&vport->mc_mac_list);
1819                 spin_lock_init(&vport->mac_list_lock);
1820
1821                 if (i == 0)
1822                         ret = hclge_vport_setup(vport, tqp_main_vport);
1823                 else
1824                         ret = hclge_vport_setup(vport, tqp_per_vport);
1825                 if (ret) {
1826                         dev_err(&pdev->dev,
1827                                 "vport setup failed for vport %d, %d\n",
1828                                 i, ret);
1829                         return ret;
1830                 }
1831
1832                 vport++;
1833         }
1834
1835         return 0;
1836 }
1837
1838 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1839                                     struct hclge_pkt_buf_alloc *buf_alloc)
1840 {
1841 /* TX buffer size is unit by 128 byte */
1842 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1843 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1844         struct hclge_tx_buff_alloc_cmd *req;
1845         struct hclge_desc desc;
1846         int ret;
1847         u8 i;
1848
1849         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1850
1851         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1852         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1853                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1854
1855                 req->tx_pkt_buff[i] =
1856                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1857                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1858         }
1859
1860         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1861         if (ret)
1862                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1863                         ret);
1864
1865         return ret;
1866 }
1867
1868 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1869                                  struct hclge_pkt_buf_alloc *buf_alloc)
1870 {
1871         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1872
1873         if (ret)
1874                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1875
1876         return ret;
1877 }
1878
1879 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1880 {
1881         unsigned int i;
1882         u32 cnt = 0;
1883
1884         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1885                 if (hdev->hw_tc_map & BIT(i))
1886                         cnt++;
1887         return cnt;
1888 }
1889
1890 /* Get the number of pfc enabled TCs, which have private buffer */
1891 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1892                                   struct hclge_pkt_buf_alloc *buf_alloc)
1893 {
1894         struct hclge_priv_buf *priv;
1895         unsigned int i;
1896         int cnt = 0;
1897
1898         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1899                 priv = &buf_alloc->priv_buf[i];
1900                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1901                     priv->enable)
1902                         cnt++;
1903         }
1904
1905         return cnt;
1906 }
1907
1908 /* Get the number of pfc disabled TCs, which have private buffer */
1909 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1910                                      struct hclge_pkt_buf_alloc *buf_alloc)
1911 {
1912         struct hclge_priv_buf *priv;
1913         unsigned int i;
1914         int cnt = 0;
1915
1916         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1917                 priv = &buf_alloc->priv_buf[i];
1918                 if (hdev->hw_tc_map & BIT(i) &&
1919                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1920                     priv->enable)
1921                         cnt++;
1922         }
1923
1924         return cnt;
1925 }
1926
1927 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1928 {
1929         struct hclge_priv_buf *priv;
1930         u32 rx_priv = 0;
1931         int i;
1932
1933         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1934                 priv = &buf_alloc->priv_buf[i];
1935                 if (priv->enable)
1936                         rx_priv += priv->buf_size;
1937         }
1938         return rx_priv;
1939 }
1940
1941 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1942 {
1943         u32 i, total_tx_size = 0;
1944
1945         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1946                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1947
1948         return total_tx_size;
1949 }
1950
1951 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1952                                 struct hclge_pkt_buf_alloc *buf_alloc,
1953                                 u32 rx_all)
1954 {
1955         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1956         u32 tc_num = hclge_get_tc_num(hdev);
1957         u32 shared_buf, aligned_mps;
1958         u32 rx_priv;
1959         int i;
1960
1961         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1962
1963         if (hnae3_dev_dcb_supported(hdev))
1964                 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1965                                         hdev->dv_buf_size;
1966         else
1967                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1968                                         + hdev->dv_buf_size;
1969
1970         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1971         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1972                              HCLGE_BUF_SIZE_UNIT);
1973
1974         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1975         if (rx_all < rx_priv + shared_std)
1976                 return false;
1977
1978         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1979         buf_alloc->s_buf.buf_size = shared_buf;
1980         if (hnae3_dev_dcb_supported(hdev)) {
1981                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1982                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1983                         - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1984                                   HCLGE_BUF_SIZE_UNIT);
1985         } else {
1986                 buf_alloc->s_buf.self.high = aligned_mps +
1987                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1988                 buf_alloc->s_buf.self.low = aligned_mps;
1989         }
1990
1991         if (hnae3_dev_dcb_supported(hdev)) {
1992                 hi_thrd = shared_buf - hdev->dv_buf_size;
1993
1994                 if (tc_num <= NEED_RESERVE_TC_NUM)
1995                         hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1996                                         / BUF_MAX_PERCENT;
1997
1998                 if (tc_num)
1999                         hi_thrd = hi_thrd / tc_num;
2000
2001                 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2002                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2003                 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2004         } else {
2005                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2006                 lo_thrd = aligned_mps;
2007         }
2008
2009         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2010                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2011                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2012         }
2013
2014         return true;
2015 }
2016
2017 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2018                                 struct hclge_pkt_buf_alloc *buf_alloc)
2019 {
2020         u32 i, total_size;
2021
2022         total_size = hdev->pkt_buf_size;
2023
2024         /* alloc tx buffer for all enabled tc */
2025         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2026                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2027
2028                 if (hdev->hw_tc_map & BIT(i)) {
2029                         if (total_size < hdev->tx_buf_size)
2030                                 return -ENOMEM;
2031
2032                         priv->tx_buf_size = hdev->tx_buf_size;
2033                 } else {
2034                         priv->tx_buf_size = 0;
2035                 }
2036
2037                 total_size -= priv->tx_buf_size;
2038         }
2039
2040         return 0;
2041 }
2042
2043 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2044                                   struct hclge_pkt_buf_alloc *buf_alloc)
2045 {
2046         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2047         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2048         unsigned int i;
2049
2050         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2051                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2052
2053                 priv->enable = 0;
2054                 priv->wl.low = 0;
2055                 priv->wl.high = 0;
2056                 priv->buf_size = 0;
2057
2058                 if (!(hdev->hw_tc_map & BIT(i)))
2059                         continue;
2060
2061                 priv->enable = 1;
2062
2063                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2064                         priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2065                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
2066                                                 HCLGE_BUF_SIZE_UNIT);
2067                 } else {
2068                         priv->wl.low = 0;
2069                         priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2070                                         aligned_mps;
2071                 }
2072
2073                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2074         }
2075
2076         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2077 }
2078
2079 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2080                                           struct hclge_pkt_buf_alloc *buf_alloc)
2081 {
2082         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2083         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2084         int i;
2085
2086         /* let the last to be cleared first */
2087         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2088                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2089                 unsigned int mask = BIT((unsigned int)i);
2090
2091                 if (hdev->hw_tc_map & mask &&
2092                     !(hdev->tm_info.hw_pfc_map & mask)) {
2093                         /* Clear the no pfc TC private buffer */
2094                         priv->wl.low = 0;
2095                         priv->wl.high = 0;
2096                         priv->buf_size = 0;
2097                         priv->enable = 0;
2098                         no_pfc_priv_num--;
2099                 }
2100
2101                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2102                     no_pfc_priv_num == 0)
2103                         break;
2104         }
2105
2106         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2107 }
2108
2109 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2110                                         struct hclge_pkt_buf_alloc *buf_alloc)
2111 {
2112         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2113         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2114         int i;
2115
2116         /* let the last to be cleared first */
2117         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2118                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2119                 unsigned int mask = BIT((unsigned int)i);
2120
2121                 if (hdev->hw_tc_map & mask &&
2122                     hdev->tm_info.hw_pfc_map & mask) {
2123                         /* Reduce the number of pfc TC with private buffer */
2124                         priv->wl.low = 0;
2125                         priv->enable = 0;
2126                         priv->wl.high = 0;
2127                         priv->buf_size = 0;
2128                         pfc_priv_num--;
2129                 }
2130
2131                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2132                     pfc_priv_num == 0)
2133                         break;
2134         }
2135
2136         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2137 }
2138
2139 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2140                                       struct hclge_pkt_buf_alloc *buf_alloc)
2141 {
2142 #define COMPENSATE_BUFFER       0x3C00
2143 #define COMPENSATE_HALF_MPS_NUM 5
2144 #define PRIV_WL_GAP             0x1800
2145
2146         u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2147         u32 tc_num = hclge_get_tc_num(hdev);
2148         u32 half_mps = hdev->mps >> 1;
2149         u32 min_rx_priv;
2150         unsigned int i;
2151
2152         if (tc_num)
2153                 rx_priv = rx_priv / tc_num;
2154
2155         if (tc_num <= NEED_RESERVE_TC_NUM)
2156                 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2157
2158         min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2159                         COMPENSATE_HALF_MPS_NUM * half_mps;
2160         min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2161         rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2162
2163         if (rx_priv < min_rx_priv)
2164                 return false;
2165
2166         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2167                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2168
2169                 priv->enable = 0;
2170                 priv->wl.low = 0;
2171                 priv->wl.high = 0;
2172                 priv->buf_size = 0;
2173
2174                 if (!(hdev->hw_tc_map & BIT(i)))
2175                         continue;
2176
2177                 priv->enable = 1;
2178                 priv->buf_size = rx_priv;
2179                 priv->wl.high = rx_priv - hdev->dv_buf_size;
2180                 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2181         }
2182
2183         buf_alloc->s_buf.buf_size = 0;
2184
2185         return true;
2186 }
2187
2188 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2189  * @hdev: pointer to struct hclge_dev
2190  * @buf_alloc: pointer to buffer calculation data
2191  * @return: 0: calculate sucessful, negative: fail
2192  */
2193 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2194                                 struct hclge_pkt_buf_alloc *buf_alloc)
2195 {
2196         /* When DCB is not supported, rx private buffer is not allocated. */
2197         if (!hnae3_dev_dcb_supported(hdev)) {
2198                 u32 rx_all = hdev->pkt_buf_size;
2199
2200                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2201                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2202                         return -ENOMEM;
2203
2204                 return 0;
2205         }
2206
2207         if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2208                 return 0;
2209
2210         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2211                 return 0;
2212
2213         /* try to decrease the buffer size */
2214         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2215                 return 0;
2216
2217         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2218                 return 0;
2219
2220         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2221                 return 0;
2222
2223         return -ENOMEM;
2224 }
2225
2226 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2227                                    struct hclge_pkt_buf_alloc *buf_alloc)
2228 {
2229         struct hclge_rx_priv_buff_cmd *req;
2230         struct hclge_desc desc;
2231         int ret;
2232         int i;
2233
2234         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2235         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2236
2237         /* Alloc private buffer TCs */
2238         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2239                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2240
2241                 req->buf_num[i] =
2242                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2243                 req->buf_num[i] |=
2244                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2245         }
2246
2247         req->shared_buf =
2248                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2249                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
2250
2251         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2252         if (ret)
2253                 dev_err(&hdev->pdev->dev,
2254                         "rx private buffer alloc cmd failed %d\n", ret);
2255
2256         return ret;
2257 }
2258
2259 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2260                                    struct hclge_pkt_buf_alloc *buf_alloc)
2261 {
2262         struct hclge_rx_priv_wl_buf *req;
2263         struct hclge_priv_buf *priv;
2264         struct hclge_desc desc[2];
2265         int i, j;
2266         int ret;
2267
2268         for (i = 0; i < 2; i++) {
2269                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2270                                            false);
2271                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2272
2273                 /* The first descriptor set the NEXT bit to 1 */
2274                 if (i == 0)
2275                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2276                 else
2277                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2278
2279                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2280                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2281
2282                         priv = &buf_alloc->priv_buf[idx];
2283                         req->tc_wl[j].high =
2284                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2285                         req->tc_wl[j].high |=
2286                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2287                         req->tc_wl[j].low =
2288                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2289                         req->tc_wl[j].low |=
2290                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2291                 }
2292         }
2293
2294         /* Send 2 descriptor at one time */
2295         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2296         if (ret)
2297                 dev_err(&hdev->pdev->dev,
2298                         "rx private waterline config cmd failed %d\n",
2299                         ret);
2300         return ret;
2301 }
2302
2303 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2304                                     struct hclge_pkt_buf_alloc *buf_alloc)
2305 {
2306         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2307         struct hclge_rx_com_thrd *req;
2308         struct hclge_desc desc[2];
2309         struct hclge_tc_thrd *tc;
2310         int i, j;
2311         int ret;
2312
2313         for (i = 0; i < 2; i++) {
2314                 hclge_cmd_setup_basic_desc(&desc[i],
2315                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2316                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2317
2318                 /* The first descriptor set the NEXT bit to 1 */
2319                 if (i == 0)
2320                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2321                 else
2322                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2323
2324                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2325                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2326
2327                         req->com_thrd[j].high =
2328                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2329                         req->com_thrd[j].high |=
2330                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2331                         req->com_thrd[j].low =
2332                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2333                         req->com_thrd[j].low |=
2334                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2335                 }
2336         }
2337
2338         /* Send 2 descriptors at one time */
2339         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2340         if (ret)
2341                 dev_err(&hdev->pdev->dev,
2342                         "common threshold config cmd failed %d\n", ret);
2343         return ret;
2344 }
2345
2346 static int hclge_common_wl_config(struct hclge_dev *hdev,
2347                                   struct hclge_pkt_buf_alloc *buf_alloc)
2348 {
2349         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2350         struct hclge_rx_com_wl *req;
2351         struct hclge_desc desc;
2352         int ret;
2353
2354         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2355
2356         req = (struct hclge_rx_com_wl *)desc.data;
2357         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2358         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2359
2360         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2361         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2362
2363         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2364         if (ret)
2365                 dev_err(&hdev->pdev->dev,
2366                         "common waterline config cmd failed %d\n", ret);
2367
2368         return ret;
2369 }
2370
2371 int hclge_buffer_alloc(struct hclge_dev *hdev)
2372 {
2373         struct hclge_pkt_buf_alloc *pkt_buf;
2374         int ret;
2375
2376         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2377         if (!pkt_buf)
2378                 return -ENOMEM;
2379
2380         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2381         if (ret) {
2382                 dev_err(&hdev->pdev->dev,
2383                         "could not calc tx buffer size for all TCs %d\n", ret);
2384                 goto out;
2385         }
2386
2387         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2388         if (ret) {
2389                 dev_err(&hdev->pdev->dev,
2390                         "could not alloc tx buffers %d\n", ret);
2391                 goto out;
2392         }
2393
2394         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2395         if (ret) {
2396                 dev_err(&hdev->pdev->dev,
2397                         "could not calc rx priv buffer size for all TCs %d\n",
2398                         ret);
2399                 goto out;
2400         }
2401
2402         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2403         if (ret) {
2404                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2405                         ret);
2406                 goto out;
2407         }
2408
2409         if (hnae3_dev_dcb_supported(hdev)) {
2410                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2411                 if (ret) {
2412                         dev_err(&hdev->pdev->dev,
2413                                 "could not configure rx private waterline %d\n",
2414                                 ret);
2415                         goto out;
2416                 }
2417
2418                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2419                 if (ret) {
2420                         dev_err(&hdev->pdev->dev,
2421                                 "could not configure common threshold %d\n",
2422                                 ret);
2423                         goto out;
2424                 }
2425         }
2426
2427         ret = hclge_common_wl_config(hdev, pkt_buf);
2428         if (ret)
2429                 dev_err(&hdev->pdev->dev,
2430                         "could not configure common waterline %d\n", ret);
2431
2432 out:
2433         kfree(pkt_buf);
2434         return ret;
2435 }
2436
2437 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2438 {
2439         struct hnae3_handle *roce = &vport->roce;
2440         struct hnae3_handle *nic = &vport->nic;
2441         struct hclge_dev *hdev = vport->back;
2442
2443         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2444
2445         if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2446                 return -EINVAL;
2447
2448         roce->rinfo.base_vector = hdev->roce_base_vector;
2449
2450         roce->rinfo.netdev = nic->kinfo.netdev;
2451         roce->rinfo.roce_io_base = hdev->hw.io_base;
2452         roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2453
2454         roce->pdev = nic->pdev;
2455         roce->ae_algo = nic->ae_algo;
2456         roce->numa_node_mask = nic->numa_node_mask;
2457
2458         return 0;
2459 }
2460
2461 static int hclge_init_msi(struct hclge_dev *hdev)
2462 {
2463         struct pci_dev *pdev = hdev->pdev;
2464         int vectors;
2465         int i;
2466
2467         vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2468                                         hdev->num_msi,
2469                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2470         if (vectors < 0) {
2471                 dev_err(&pdev->dev,
2472                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2473                         vectors);
2474                 return vectors;
2475         }
2476         if (vectors < hdev->num_msi)
2477                 dev_warn(&hdev->pdev->dev,
2478                          "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2479                          hdev->num_msi, vectors);
2480
2481         hdev->num_msi = vectors;
2482         hdev->num_msi_left = vectors;
2483
2484         hdev->base_msi_vector = pdev->irq;
2485         hdev->roce_base_vector = hdev->base_msi_vector +
2486                                 hdev->num_nic_msi;
2487
2488         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2489                                            sizeof(u16), GFP_KERNEL);
2490         if (!hdev->vector_status) {
2491                 pci_free_irq_vectors(pdev);
2492                 return -ENOMEM;
2493         }
2494
2495         for (i = 0; i < hdev->num_msi; i++)
2496                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2497
2498         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2499                                         sizeof(int), GFP_KERNEL);
2500         if (!hdev->vector_irq) {
2501                 pci_free_irq_vectors(pdev);
2502                 return -ENOMEM;
2503         }
2504
2505         return 0;
2506 }
2507
2508 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2509 {
2510         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2511                 duplex = HCLGE_MAC_FULL;
2512
2513         return duplex;
2514 }
2515
2516 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2517                                       u8 duplex)
2518 {
2519         struct hclge_config_mac_speed_dup_cmd *req;
2520         struct hclge_desc desc;
2521         int ret;
2522
2523         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2524
2525         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2526
2527         if (duplex)
2528                 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2529
2530         switch (speed) {
2531         case HCLGE_MAC_SPEED_10M:
2532                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2533                                 HCLGE_CFG_SPEED_S, 6);
2534                 break;
2535         case HCLGE_MAC_SPEED_100M:
2536                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2537                                 HCLGE_CFG_SPEED_S, 7);
2538                 break;
2539         case HCLGE_MAC_SPEED_1G:
2540                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2541                                 HCLGE_CFG_SPEED_S, 0);
2542                 break;
2543         case HCLGE_MAC_SPEED_10G:
2544                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2545                                 HCLGE_CFG_SPEED_S, 1);
2546                 break;
2547         case HCLGE_MAC_SPEED_25G:
2548                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2549                                 HCLGE_CFG_SPEED_S, 2);
2550                 break;
2551         case HCLGE_MAC_SPEED_40G:
2552                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2553                                 HCLGE_CFG_SPEED_S, 3);
2554                 break;
2555         case HCLGE_MAC_SPEED_50G:
2556                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2557                                 HCLGE_CFG_SPEED_S, 4);
2558                 break;
2559         case HCLGE_MAC_SPEED_100G:
2560                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2561                                 HCLGE_CFG_SPEED_S, 5);
2562                 break;
2563         case HCLGE_MAC_SPEED_200G:
2564                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2565                                 HCLGE_CFG_SPEED_S, 8);
2566                 break;
2567         default:
2568                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2569                 return -EINVAL;
2570         }
2571
2572         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2573                       1);
2574
2575         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2576         if (ret) {
2577                 dev_err(&hdev->pdev->dev,
2578                         "mac speed/duplex config cmd failed %d.\n", ret);
2579                 return ret;
2580         }
2581
2582         return 0;
2583 }
2584
2585 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2586 {
2587         struct hclge_mac *mac = &hdev->hw.mac;
2588         int ret;
2589
2590         duplex = hclge_check_speed_dup(duplex, speed);
2591         if (!mac->support_autoneg && mac->speed == speed &&
2592             mac->duplex == duplex)
2593                 return 0;
2594
2595         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2596         if (ret)
2597                 return ret;
2598
2599         hdev->hw.mac.speed = speed;
2600         hdev->hw.mac.duplex = duplex;
2601
2602         return 0;
2603 }
2604
2605 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2606                                      u8 duplex)
2607 {
2608         struct hclge_vport *vport = hclge_get_vport(handle);
2609         struct hclge_dev *hdev = vport->back;
2610
2611         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2612 }
2613
2614 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2615 {
2616         struct hclge_config_auto_neg_cmd *req;
2617         struct hclge_desc desc;
2618         u32 flag = 0;
2619         int ret;
2620
2621         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2622
2623         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2624         if (enable)
2625                 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2626         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2627
2628         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2629         if (ret)
2630                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2631                         ret);
2632
2633         return ret;
2634 }
2635
2636 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2637 {
2638         struct hclge_vport *vport = hclge_get_vport(handle);
2639         struct hclge_dev *hdev = vport->back;
2640
2641         if (!hdev->hw.mac.support_autoneg) {
2642                 if (enable) {
2643                         dev_err(&hdev->pdev->dev,
2644                                 "autoneg is not supported by current port\n");
2645                         return -EOPNOTSUPP;
2646                 } else {
2647                         return 0;
2648                 }
2649         }
2650
2651         return hclge_set_autoneg_en(hdev, enable);
2652 }
2653
2654 static int hclge_get_autoneg(struct hnae3_handle *handle)
2655 {
2656         struct hclge_vport *vport = hclge_get_vport(handle);
2657         struct hclge_dev *hdev = vport->back;
2658         struct phy_device *phydev = hdev->hw.mac.phydev;
2659
2660         if (phydev)
2661                 return phydev->autoneg;
2662
2663         return hdev->hw.mac.autoneg;
2664 }
2665
2666 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2667 {
2668         struct hclge_vport *vport = hclge_get_vport(handle);
2669         struct hclge_dev *hdev = vport->back;
2670         int ret;
2671
2672         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2673
2674         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2675         if (ret)
2676                 return ret;
2677         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2678 }
2679
2680 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2681 {
2682         struct hclge_vport *vport = hclge_get_vport(handle);
2683         struct hclge_dev *hdev = vport->back;
2684
2685         if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2686                 return hclge_set_autoneg_en(hdev, !halt);
2687
2688         return 0;
2689 }
2690
2691 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2692 {
2693         struct hclge_config_fec_cmd *req;
2694         struct hclge_desc desc;
2695         int ret;
2696
2697         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2698
2699         req = (struct hclge_config_fec_cmd *)desc.data;
2700         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2701                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2702         if (fec_mode & BIT(HNAE3_FEC_RS))
2703                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2704                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2705         if (fec_mode & BIT(HNAE3_FEC_BASER))
2706                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2707                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2708
2709         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2710         if (ret)
2711                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2712
2713         return ret;
2714 }
2715
2716 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2717 {
2718         struct hclge_vport *vport = hclge_get_vport(handle);
2719         struct hclge_dev *hdev = vport->back;
2720         struct hclge_mac *mac = &hdev->hw.mac;
2721         int ret;
2722
2723         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2724                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2725                 return -EINVAL;
2726         }
2727
2728         ret = hclge_set_fec_hw(hdev, fec_mode);
2729         if (ret)
2730                 return ret;
2731
2732         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2733         return 0;
2734 }
2735
2736 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2737                           u8 *fec_mode)
2738 {
2739         struct hclge_vport *vport = hclge_get_vport(handle);
2740         struct hclge_dev *hdev = vport->back;
2741         struct hclge_mac *mac = &hdev->hw.mac;
2742
2743         if (fec_ability)
2744                 *fec_ability = mac->fec_ability;
2745         if (fec_mode)
2746                 *fec_mode = mac->fec_mode;
2747 }
2748
2749 static int hclge_mac_init(struct hclge_dev *hdev)
2750 {
2751         struct hclge_mac *mac = &hdev->hw.mac;
2752         int ret;
2753
2754         hdev->support_sfp_query = true;
2755         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2756         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2757                                          hdev->hw.mac.duplex);
2758         if (ret)
2759                 return ret;
2760
2761         if (hdev->hw.mac.support_autoneg) {
2762                 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2763                 if (ret)
2764                         return ret;
2765         }
2766
2767         mac->link = 0;
2768
2769         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2770                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2771                 if (ret)
2772                         return ret;
2773         }
2774
2775         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2776         if (ret) {
2777                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2778                 return ret;
2779         }
2780
2781         ret = hclge_set_default_loopback(hdev);
2782         if (ret)
2783                 return ret;
2784
2785         ret = hclge_buffer_alloc(hdev);
2786         if (ret)
2787                 dev_err(&hdev->pdev->dev,
2788                         "allocate buffer fail, ret=%d\n", ret);
2789
2790         return ret;
2791 }
2792
2793 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2794 {
2795         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2796             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2797                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2798                                     hclge_wq, &hdev->service_task, 0);
2799 }
2800
2801 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2802 {
2803         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2804             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2805                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2806                                     hclge_wq, &hdev->service_task, 0);
2807 }
2808
2809 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2810 {
2811         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2812             !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2813                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2814                                     hclge_wq, &hdev->service_task,
2815                                     delay_time);
2816 }
2817
2818 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2819 {
2820         struct hclge_link_status_cmd *req;
2821         struct hclge_desc desc;
2822         int ret;
2823
2824         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2825         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2826         if (ret) {
2827                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2828                         ret);
2829                 return ret;
2830         }
2831
2832         req = (struct hclge_link_status_cmd *)desc.data;
2833         *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2834                 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2835
2836         return 0;
2837 }
2838
2839 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2840 {
2841         struct phy_device *phydev = hdev->hw.mac.phydev;
2842
2843         *link_status = HCLGE_LINK_STATUS_DOWN;
2844
2845         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2846                 return 0;
2847
2848         if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2849                 return 0;
2850
2851         return hclge_get_mac_link_status(hdev, link_status);
2852 }
2853
2854 static void hclge_update_link_status(struct hclge_dev *hdev)
2855 {
2856         struct hnae3_client *rclient = hdev->roce_client;
2857         struct hnae3_client *client = hdev->nic_client;
2858         struct hnae3_handle *rhandle;
2859         struct hnae3_handle *handle;
2860         int state;
2861         int ret;
2862         int i;
2863
2864         if (!client)
2865                 return;
2866
2867         if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2868                 return;
2869
2870         ret = hclge_get_mac_phy_link(hdev, &state);
2871         if (ret) {
2872                 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2873                 return;
2874         }
2875
2876         if (state != hdev->hw.mac.link) {
2877                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2878                         handle = &hdev->vport[i].nic;
2879                         client->ops->link_status_change(handle, state);
2880                         hclge_config_mac_tnl_int(hdev, state);
2881                         rhandle = &hdev->vport[i].roce;
2882                         if (rclient && rclient->ops->link_status_change)
2883                                 rclient->ops->link_status_change(rhandle,
2884                                                                  state);
2885                 }
2886                 hdev->hw.mac.link = state;
2887         }
2888
2889         clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2890 }
2891
2892 static void hclge_update_port_capability(struct hclge_dev *hdev,
2893                                          struct hclge_mac *mac)
2894 {
2895         if (hnae3_dev_fec_supported(hdev))
2896                 /* update fec ability by speed */
2897                 hclge_convert_setting_fec(mac);
2898
2899         /* firmware can not identify back plane type, the media type
2900          * read from configuration can help deal it
2901          */
2902         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2903             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2904                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2905         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2906                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2907
2908         if (mac->support_autoneg) {
2909                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2910                 linkmode_copy(mac->advertising, mac->supported);
2911         } else {
2912                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2913                                    mac->supported);
2914                 linkmode_zero(mac->advertising);
2915         }
2916 }
2917
2918 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2919 {
2920         struct hclge_sfp_info_cmd *resp;
2921         struct hclge_desc desc;
2922         int ret;
2923
2924         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2925         resp = (struct hclge_sfp_info_cmd *)desc.data;
2926         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2927         if (ret == -EOPNOTSUPP) {
2928                 dev_warn(&hdev->pdev->dev,
2929                          "IMP do not support get SFP speed %d\n", ret);
2930                 return ret;
2931         } else if (ret) {
2932                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2933                 return ret;
2934         }
2935
2936         *speed = le32_to_cpu(resp->speed);
2937
2938         return 0;
2939 }
2940
2941 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2942 {
2943         struct hclge_sfp_info_cmd *resp;
2944         struct hclge_desc desc;
2945         int ret;
2946
2947         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2948         resp = (struct hclge_sfp_info_cmd *)desc.data;
2949
2950         resp->query_type = QUERY_ACTIVE_SPEED;
2951
2952         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2953         if (ret == -EOPNOTSUPP) {
2954                 dev_warn(&hdev->pdev->dev,
2955                          "IMP does not support get SFP info %d\n", ret);
2956                 return ret;
2957         } else if (ret) {
2958                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2959                 return ret;
2960         }
2961
2962         /* In some case, mac speed get from IMP may be 0, it shouldn't be
2963          * set to mac->speed.
2964          */
2965         if (!le32_to_cpu(resp->speed))
2966                 return 0;
2967
2968         mac->speed = le32_to_cpu(resp->speed);
2969         /* if resp->speed_ability is 0, it means it's an old version
2970          * firmware, do not update these params
2971          */
2972         if (resp->speed_ability) {
2973                 mac->module_type = le32_to_cpu(resp->module_type);
2974                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2975                 mac->autoneg = resp->autoneg;
2976                 mac->support_autoneg = resp->autoneg_ability;
2977                 mac->speed_type = QUERY_ACTIVE_SPEED;
2978                 if (!resp->active_fec)
2979                         mac->fec_mode = 0;
2980                 else
2981                         mac->fec_mode = BIT(resp->active_fec);
2982         } else {
2983                 mac->speed_type = QUERY_SFP_SPEED;
2984         }
2985
2986         return 0;
2987 }
2988
2989 static int hclge_update_port_info(struct hclge_dev *hdev)
2990 {
2991         struct hclge_mac *mac = &hdev->hw.mac;
2992         int speed = HCLGE_MAC_SPEED_UNKNOWN;
2993         int ret;
2994
2995         /* get the port info from SFP cmd if not copper port */
2996         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2997                 return 0;
2998
2999         /* if IMP does not support get SFP/qSFP info, return directly */
3000         if (!hdev->support_sfp_query)
3001                 return 0;
3002
3003         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3004                 ret = hclge_get_sfp_info(hdev, mac);
3005         else
3006                 ret = hclge_get_sfp_speed(hdev, &speed);
3007
3008         if (ret == -EOPNOTSUPP) {
3009                 hdev->support_sfp_query = false;
3010                 return ret;
3011         } else if (ret) {
3012                 return ret;
3013         }
3014
3015         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3016                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3017                         hclge_update_port_capability(hdev, mac);
3018                         return 0;
3019                 }
3020                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3021                                                HCLGE_MAC_FULL);
3022         } else {
3023                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3024                         return 0; /* do nothing if no SFP */
3025
3026                 /* must config full duplex for SFP */
3027                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3028         }
3029 }
3030
3031 static int hclge_get_status(struct hnae3_handle *handle)
3032 {
3033         struct hclge_vport *vport = hclge_get_vport(handle);
3034         struct hclge_dev *hdev = vport->back;
3035
3036         hclge_update_link_status(hdev);
3037
3038         return hdev->hw.mac.link;
3039 }
3040
3041 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3042 {
3043         if (!pci_num_vf(hdev->pdev)) {
3044                 dev_err(&hdev->pdev->dev,
3045                         "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3046                 return NULL;
3047         }
3048
3049         if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3050                 dev_err(&hdev->pdev->dev,
3051                         "vf id(%d) is out of range(0 <= vfid < %d)\n",
3052                         vf, pci_num_vf(hdev->pdev));
3053                 return NULL;
3054         }
3055
3056         /* VF start from 1 in vport */
3057         vf += HCLGE_VF_VPORT_START_NUM;
3058         return &hdev->vport[vf];
3059 }
3060
3061 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3062                                struct ifla_vf_info *ivf)
3063 {
3064         struct hclge_vport *vport = hclge_get_vport(handle);
3065         struct hclge_dev *hdev = vport->back;
3066
3067         vport = hclge_get_vf_vport(hdev, vf);
3068         if (!vport)
3069                 return -EINVAL;
3070
3071         ivf->vf = vf;
3072         ivf->linkstate = vport->vf_info.link_state;
3073         ivf->spoofchk = vport->vf_info.spoofchk;
3074         ivf->trusted = vport->vf_info.trusted;
3075         ivf->min_tx_rate = 0;
3076         ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3077         ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3078         ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3079         ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3080         ether_addr_copy(ivf->mac, vport->vf_info.mac);
3081
3082         return 0;
3083 }
3084
3085 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3086                                    int link_state)
3087 {
3088         struct hclge_vport *vport = hclge_get_vport(handle);
3089         struct hclge_dev *hdev = vport->back;
3090
3091         vport = hclge_get_vf_vport(hdev, vf);
3092         if (!vport)
3093                 return -EINVAL;
3094
3095         vport->vf_info.link_state = link_state;
3096
3097         return 0;
3098 }
3099
3100 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3101 {
3102         u32 cmdq_src_reg, msix_src_reg;
3103
3104         /* fetch the events from their corresponding regs */
3105         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3106         msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3107
3108         /* Assumption: If by any chance reset and mailbox events are reported
3109          * together then we will only process reset event in this go and will
3110          * defer the processing of the mailbox events. Since, we would have not
3111          * cleared RX CMDQ event this time we would receive again another
3112          * interrupt from H/W just for the mailbox.
3113          *
3114          * check for vector0 reset event sources
3115          */
3116         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3117                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3118                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3119                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3120                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3121                 hdev->rst_stats.imp_rst_cnt++;
3122                 return HCLGE_VECTOR0_EVENT_RST;
3123         }
3124
3125         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3126                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3127                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3128                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3129                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3130                 hdev->rst_stats.global_rst_cnt++;
3131                 return HCLGE_VECTOR0_EVENT_RST;
3132         }
3133
3134         /* check for vector0 msix event source */
3135         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3136                 *clearval = msix_src_reg;
3137                 return HCLGE_VECTOR0_EVENT_ERR;
3138         }
3139
3140         /* check for vector0 mailbox(=CMDQ RX) event source */
3141         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3142                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3143                 *clearval = cmdq_src_reg;
3144                 return HCLGE_VECTOR0_EVENT_MBX;
3145         }
3146
3147         /* print other vector0 event source */
3148         dev_info(&hdev->pdev->dev,
3149                  "CMDQ INT status:0x%x, other INT status:0x%x\n",
3150                  cmdq_src_reg, msix_src_reg);
3151         *clearval = msix_src_reg;
3152
3153         return HCLGE_VECTOR0_EVENT_OTHER;
3154 }
3155
3156 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3157                                     u32 regclr)
3158 {
3159         switch (event_type) {
3160         case HCLGE_VECTOR0_EVENT_RST:
3161                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3162                 break;
3163         case HCLGE_VECTOR0_EVENT_MBX:
3164                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3165                 break;
3166         default:
3167                 break;
3168         }
3169 }
3170
3171 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3172 {
3173         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3174                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3175                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3176                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3177         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3178 }
3179
3180 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3181 {
3182         writel(enable ? 1 : 0, vector->addr);
3183 }
3184
3185 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3186 {
3187         struct hclge_dev *hdev = data;
3188         u32 clearval = 0;
3189         u32 event_cause;
3190
3191         hclge_enable_vector(&hdev->misc_vector, false);
3192         event_cause = hclge_check_event_cause(hdev, &clearval);
3193
3194         /* vector 0 interrupt is shared with reset and mailbox source events.*/
3195         switch (event_cause) {
3196         case HCLGE_VECTOR0_EVENT_ERR:
3197                 /* we do not know what type of reset is required now. This could
3198                  * only be decided after we fetch the type of errors which
3199                  * caused this event. Therefore, we will do below for now:
3200                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3201                  *    have defered type of reset to be used.
3202                  * 2. Schedule the reset serivce task.
3203                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3204                  *    will fetch the correct type of reset.  This would be done
3205                  *    by first decoding the types of errors.
3206                  */
3207                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3208                 fallthrough;
3209         case HCLGE_VECTOR0_EVENT_RST:
3210                 hclge_reset_task_schedule(hdev);
3211                 break;
3212         case HCLGE_VECTOR0_EVENT_MBX:
3213                 /* If we are here then,
3214                  * 1. Either we are not handling any mbx task and we are not
3215                  *    scheduled as well
3216                  *                        OR
3217                  * 2. We could be handling a mbx task but nothing more is
3218                  *    scheduled.
3219                  * In both cases, we should schedule mbx task as there are more
3220                  * mbx messages reported by this interrupt.
3221                  */
3222                 hclge_mbx_task_schedule(hdev);
3223                 break;
3224         default:
3225                 dev_warn(&hdev->pdev->dev,
3226                          "received unknown or unhandled event of vector0\n");
3227                 break;
3228         }
3229
3230         hclge_clear_event_cause(hdev, event_cause, clearval);
3231
3232         /* Enable interrupt if it is not cause by reset. And when
3233          * clearval equal to 0, it means interrupt status may be
3234          * cleared by hardware before driver reads status register.
3235          * For this case, vector0 interrupt also should be enabled.
3236          */
3237         if (!clearval ||
3238             event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3239                 hclge_enable_vector(&hdev->misc_vector, true);
3240         }
3241
3242         return IRQ_HANDLED;
3243 }
3244
3245 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3246 {
3247         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3248                 dev_warn(&hdev->pdev->dev,
3249                          "vector(vector_id %d) has been freed.\n", vector_id);
3250                 return;
3251         }
3252
3253         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3254         hdev->num_msi_left += 1;
3255         hdev->num_msi_used -= 1;
3256 }
3257
3258 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3259 {
3260         struct hclge_misc_vector *vector = &hdev->misc_vector;
3261
3262         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3263
3264         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3265         hdev->vector_status[0] = 0;
3266
3267         hdev->num_msi_left -= 1;
3268         hdev->num_msi_used += 1;
3269 }
3270
3271 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3272                                       const cpumask_t *mask)
3273 {
3274         struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3275                                               affinity_notify);
3276
3277         cpumask_copy(&hdev->affinity_mask, mask);
3278 }
3279
3280 static void hclge_irq_affinity_release(struct kref *ref)
3281 {
3282 }
3283
3284 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3285 {
3286         irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3287                               &hdev->affinity_mask);
3288
3289         hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3290         hdev->affinity_notify.release = hclge_irq_affinity_release;
3291         irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3292                                   &hdev->affinity_notify);
3293 }
3294
3295 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3296 {
3297         irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3298         irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3299 }
3300
3301 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3302 {
3303         int ret;
3304
3305         hclge_get_misc_vector(hdev);
3306
3307         /* this would be explicitly freed in the end */
3308         snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3309                  HCLGE_NAME, pci_name(hdev->pdev));
3310         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3311                           0, hdev->misc_vector.name, hdev);
3312         if (ret) {
3313                 hclge_free_vector(hdev, 0);
3314                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3315                         hdev->misc_vector.vector_irq);
3316         }
3317
3318         return ret;
3319 }
3320
3321 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3322 {
3323         free_irq(hdev->misc_vector.vector_irq, hdev);
3324         hclge_free_vector(hdev, 0);
3325 }
3326
3327 int hclge_notify_client(struct hclge_dev *hdev,
3328                         enum hnae3_reset_notify_type type)
3329 {
3330         struct hnae3_client *client = hdev->nic_client;
3331         u16 i;
3332
3333         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3334                 return 0;
3335
3336         if (!client->ops->reset_notify)
3337                 return -EOPNOTSUPP;
3338
3339         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3340                 struct hnae3_handle *handle = &hdev->vport[i].nic;
3341                 int ret;
3342
3343                 ret = client->ops->reset_notify(handle, type);
3344                 if (ret) {
3345                         dev_err(&hdev->pdev->dev,
3346                                 "notify nic client failed %d(%d)\n", type, ret);
3347                         return ret;
3348                 }
3349         }
3350
3351         return 0;
3352 }
3353
3354 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3355                                     enum hnae3_reset_notify_type type)
3356 {
3357         struct hnae3_client *client = hdev->roce_client;
3358         int ret;
3359         u16 i;
3360
3361         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3362                 return 0;
3363
3364         if (!client->ops->reset_notify)
3365                 return -EOPNOTSUPP;
3366
3367         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3368                 struct hnae3_handle *handle = &hdev->vport[i].roce;
3369
3370                 ret = client->ops->reset_notify(handle, type);
3371                 if (ret) {
3372                         dev_err(&hdev->pdev->dev,
3373                                 "notify roce client failed %d(%d)",
3374                                 type, ret);
3375                         return ret;
3376                 }
3377         }
3378
3379         return ret;
3380 }
3381
3382 static int hclge_reset_wait(struct hclge_dev *hdev)
3383 {
3384 #define HCLGE_RESET_WATI_MS     100
3385 #define HCLGE_RESET_WAIT_CNT    350
3386
3387         u32 val, reg, reg_bit;
3388         u32 cnt = 0;
3389
3390         switch (hdev->reset_type) {
3391         case HNAE3_IMP_RESET:
3392                 reg = HCLGE_GLOBAL_RESET_REG;
3393                 reg_bit = HCLGE_IMP_RESET_BIT;
3394                 break;
3395         case HNAE3_GLOBAL_RESET:
3396                 reg = HCLGE_GLOBAL_RESET_REG;
3397                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3398                 break;
3399         case HNAE3_FUNC_RESET:
3400                 reg = HCLGE_FUN_RST_ING;
3401                 reg_bit = HCLGE_FUN_RST_ING_B;
3402                 break;
3403         default:
3404                 dev_err(&hdev->pdev->dev,
3405                         "Wait for unsupported reset type: %d\n",
3406                         hdev->reset_type);
3407                 return -EINVAL;
3408         }
3409
3410         val = hclge_read_dev(&hdev->hw, reg);
3411         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3412                 msleep(HCLGE_RESET_WATI_MS);
3413                 val = hclge_read_dev(&hdev->hw, reg);
3414                 cnt++;
3415         }
3416
3417         if (cnt >= HCLGE_RESET_WAIT_CNT) {
3418                 dev_warn(&hdev->pdev->dev,
3419                          "Wait for reset timeout: %d\n", hdev->reset_type);
3420                 return -EBUSY;
3421         }
3422
3423         return 0;
3424 }
3425
3426 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3427 {
3428         struct hclge_vf_rst_cmd *req;
3429         struct hclge_desc desc;
3430
3431         req = (struct hclge_vf_rst_cmd *)desc.data;
3432         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3433         req->dest_vfid = func_id;
3434
3435         if (reset)
3436                 req->vf_rst = 0x1;
3437
3438         return hclge_cmd_send(&hdev->hw, &desc, 1);
3439 }
3440
3441 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3442 {
3443         int i;
3444
3445         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3446                 struct hclge_vport *vport = &hdev->vport[i];
3447                 int ret;
3448
3449                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3450                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3451                 if (ret) {
3452                         dev_err(&hdev->pdev->dev,
3453                                 "set vf(%u) rst failed %d!\n",
3454                                 vport->vport_id, ret);
3455                         return ret;
3456                 }
3457
3458                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3459                         continue;
3460
3461                 /* Inform VF to process the reset.
3462                  * hclge_inform_reset_assert_to_vf may fail if VF
3463                  * driver is not loaded.
3464                  */
3465                 ret = hclge_inform_reset_assert_to_vf(vport);
3466                 if (ret)
3467                         dev_warn(&hdev->pdev->dev,
3468                                  "inform reset to vf(%u) failed %d!\n",
3469                                  vport->vport_id, ret);
3470         }
3471
3472         return 0;
3473 }
3474
3475 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3476 {
3477         if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3478             test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3479             test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3480                 return;
3481
3482         hclge_mbx_handler(hdev);
3483
3484         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3485 }
3486
3487 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3488 {
3489         struct hclge_pf_rst_sync_cmd *req;
3490         struct hclge_desc desc;
3491         int cnt = 0;
3492         int ret;
3493
3494         req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3495         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3496
3497         do {
3498                 /* vf need to down netdev by mbx during PF or FLR reset */
3499                 hclge_mailbox_service_task(hdev);
3500
3501                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3502                 /* for compatible with old firmware, wait
3503                  * 100 ms for VF to stop IO
3504                  */
3505                 if (ret == -EOPNOTSUPP) {
3506                         msleep(HCLGE_RESET_SYNC_TIME);
3507                         return;
3508                 } else if (ret) {
3509                         dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3510                                  ret);
3511                         return;
3512                 } else if (req->all_vf_ready) {
3513                         return;
3514                 }
3515                 msleep(HCLGE_PF_RESET_SYNC_TIME);
3516                 hclge_cmd_reuse_desc(&desc, true);
3517         } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3518
3519         dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3520 }
3521
3522 void hclge_report_hw_error(struct hclge_dev *hdev,
3523                            enum hnae3_hw_error_type type)
3524 {
3525         struct hnae3_client *client = hdev->nic_client;
3526         u16 i;
3527
3528         if (!client || !client->ops->process_hw_error ||
3529             !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3530                 return;
3531
3532         for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3533                 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3534 }
3535
3536 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3537 {
3538         u32 reg_val;
3539
3540         reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3541         if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3542                 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3543                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3544                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3545         }
3546
3547         if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3548                 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3549                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3550                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3551         }
3552 }
3553
3554 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3555 {
3556         struct hclge_desc desc;
3557         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3558         int ret;
3559
3560         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3561         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3562         req->fun_reset_vfid = func_id;
3563
3564         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3565         if (ret)
3566                 dev_err(&hdev->pdev->dev,
3567                         "send function reset cmd fail, status =%d\n", ret);
3568
3569         return ret;
3570 }
3571
3572 static void hclge_do_reset(struct hclge_dev *hdev)
3573 {
3574         struct hnae3_handle *handle = &hdev->vport[0].nic;
3575         struct pci_dev *pdev = hdev->pdev;
3576         u32 val;
3577
3578         if (hclge_get_hw_reset_stat(handle)) {
3579                 dev_info(&pdev->dev, "hardware reset not finish\n");
3580                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3581                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3582                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3583                 return;
3584         }
3585
3586         switch (hdev->reset_type) {
3587         case HNAE3_GLOBAL_RESET:
3588                 dev_info(&pdev->dev, "global reset requested\n");
3589                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3590                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3591                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3592                 break;
3593         case HNAE3_FUNC_RESET:
3594                 dev_info(&pdev->dev, "PF reset requested\n");
3595                 /* schedule again to check later */
3596                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3597                 hclge_reset_task_schedule(hdev);
3598                 break;
3599         default:
3600                 dev_warn(&pdev->dev,
3601                          "unsupported reset type: %d\n", hdev->reset_type);
3602                 break;
3603         }
3604 }
3605
3606 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3607                                                    unsigned long *addr)
3608 {
3609         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3610         struct hclge_dev *hdev = ae_dev->priv;
3611
3612         /* first, resolve any unknown reset type to the known type(s) */
3613         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3614                 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3615                                         HCLGE_MISC_VECTOR_INT_STS);
3616                 /* we will intentionally ignore any errors from this function
3617                  *  as we will end up in *some* reset request in any case
3618                  */
3619                 if (hclge_handle_hw_msix_error(hdev, addr))
3620                         dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3621                                  msix_sts_reg);
3622
3623                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3624                 /* We defered the clearing of the error event which caused
3625                  * interrupt since it was not posssible to do that in
3626                  * interrupt context (and this is the reason we introduced
3627                  * new UNKNOWN reset type). Now, the errors have been
3628                  * handled and cleared in hardware we can safely enable
3629                  * interrupts. This is an exception to the norm.
3630                  */
3631                 hclge_enable_vector(&hdev->misc_vector, true);
3632         }
3633
3634         /* return the highest priority reset level amongst all */
3635         if (test_bit(HNAE3_IMP_RESET, addr)) {
3636                 rst_level = HNAE3_IMP_RESET;
3637                 clear_bit(HNAE3_IMP_RESET, addr);
3638                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3639                 clear_bit(HNAE3_FUNC_RESET, addr);
3640         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3641                 rst_level = HNAE3_GLOBAL_RESET;
3642                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3643                 clear_bit(HNAE3_FUNC_RESET, addr);
3644         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3645                 rst_level = HNAE3_FUNC_RESET;
3646                 clear_bit(HNAE3_FUNC_RESET, addr);
3647         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3648                 rst_level = HNAE3_FLR_RESET;
3649                 clear_bit(HNAE3_FLR_RESET, addr);
3650         }
3651
3652         if (hdev->reset_type != HNAE3_NONE_RESET &&
3653             rst_level < hdev->reset_type)
3654                 return HNAE3_NONE_RESET;
3655
3656         return rst_level;
3657 }
3658
3659 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3660 {
3661         u32 clearval = 0;
3662
3663         switch (hdev->reset_type) {
3664         case HNAE3_IMP_RESET:
3665                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3666                 break;
3667         case HNAE3_GLOBAL_RESET:
3668                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3669                 break;
3670         default:
3671                 break;
3672         }
3673
3674         if (!clearval)
3675                 return;
3676
3677         /* For revision 0x20, the reset interrupt source
3678          * can only be cleared after hardware reset done
3679          */
3680         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3681                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3682                                 clearval);
3683
3684         hclge_enable_vector(&hdev->misc_vector, true);
3685 }
3686
3687 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3688 {
3689         u32 reg_val;
3690
3691         reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3692         if (enable)
3693                 reg_val |= HCLGE_NIC_SW_RST_RDY;
3694         else
3695                 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3696
3697         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3698 }
3699
3700 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3701 {
3702         int ret;
3703
3704         ret = hclge_set_all_vf_rst(hdev, true);
3705         if (ret)
3706                 return ret;
3707
3708         hclge_func_reset_sync_vf(hdev);
3709
3710         return 0;
3711 }
3712
3713 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3714 {
3715         u32 reg_val;
3716         int ret = 0;
3717
3718         switch (hdev->reset_type) {
3719         case HNAE3_FUNC_RESET:
3720                 ret = hclge_func_reset_notify_vf(hdev);
3721                 if (ret)
3722                         return ret;
3723
3724                 ret = hclge_func_reset_cmd(hdev, 0);
3725                 if (ret) {
3726                         dev_err(&hdev->pdev->dev,
3727                                 "asserting function reset fail %d!\n", ret);
3728                         return ret;
3729                 }
3730
3731                 /* After performaning pf reset, it is not necessary to do the
3732                  * mailbox handling or send any command to firmware, because
3733                  * any mailbox handling or command to firmware is only valid
3734                  * after hclge_cmd_init is called.
3735                  */
3736                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3737                 hdev->rst_stats.pf_rst_cnt++;
3738                 break;
3739         case HNAE3_FLR_RESET:
3740                 ret = hclge_func_reset_notify_vf(hdev);
3741                 if (ret)
3742                         return ret;
3743                 break;
3744         case HNAE3_IMP_RESET:
3745                 hclge_handle_imp_error(hdev);
3746                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3747                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3748                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3749                 break;
3750         default:
3751                 break;
3752         }
3753
3754         /* inform hardware that preparatory work is done */
3755         msleep(HCLGE_RESET_SYNC_TIME);
3756         hclge_reset_handshake(hdev, true);
3757         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3758
3759         return ret;
3760 }
3761
3762 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3763 {
3764 #define MAX_RESET_FAIL_CNT 5
3765
3766         if (hdev->reset_pending) {
3767                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3768                          hdev->reset_pending);
3769                 return true;
3770         } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3771                    HCLGE_RESET_INT_M) {
3772                 dev_info(&hdev->pdev->dev,
3773                          "reset failed because new reset interrupt\n");
3774                 hclge_clear_reset_cause(hdev);
3775                 return false;
3776         } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3777                 hdev->rst_stats.reset_fail_cnt++;
3778                 set_bit(hdev->reset_type, &hdev->reset_pending);
3779                 dev_info(&hdev->pdev->dev,
3780                          "re-schedule reset task(%u)\n",
3781                          hdev->rst_stats.reset_fail_cnt);
3782                 return true;
3783         }
3784
3785         hclge_clear_reset_cause(hdev);
3786
3787         /* recover the handshake status when reset fail */
3788         hclge_reset_handshake(hdev, true);
3789
3790         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3791
3792         hclge_dbg_dump_rst_info(hdev);
3793
3794         set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3795
3796         return false;
3797 }
3798
3799 static int hclge_set_rst_done(struct hclge_dev *hdev)
3800 {
3801         struct hclge_pf_rst_done_cmd *req;
3802         struct hclge_desc desc;
3803         int ret;
3804
3805         req = (struct hclge_pf_rst_done_cmd *)desc.data;
3806         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3807         req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3808
3809         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3810         /* To be compatible with the old firmware, which does not support
3811          * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3812          * return success
3813          */
3814         if (ret == -EOPNOTSUPP) {
3815                 dev_warn(&hdev->pdev->dev,
3816                          "current firmware does not support command(0x%x)!\n",
3817                          HCLGE_OPC_PF_RST_DONE);
3818                 return 0;
3819         } else if (ret) {
3820                 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3821                         ret);
3822         }
3823
3824         return ret;
3825 }
3826
3827 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3828 {
3829         int ret = 0;
3830
3831         switch (hdev->reset_type) {
3832         case HNAE3_FUNC_RESET:
3833         case HNAE3_FLR_RESET:
3834                 ret = hclge_set_all_vf_rst(hdev, false);
3835                 break;
3836         case HNAE3_GLOBAL_RESET:
3837         case HNAE3_IMP_RESET:
3838                 ret = hclge_set_rst_done(hdev);
3839                 break;
3840         default:
3841                 break;
3842         }
3843
3844         /* clear up the handshake status after re-initialize done */
3845         hclge_reset_handshake(hdev, false);
3846
3847         return ret;
3848 }
3849
3850 static int hclge_reset_stack(struct hclge_dev *hdev)
3851 {
3852         int ret;
3853
3854         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3855         if (ret)
3856                 return ret;
3857
3858         ret = hclge_reset_ae_dev(hdev->ae_dev);
3859         if (ret)
3860                 return ret;
3861
3862         return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3863 }
3864
3865 static int hclge_reset_prepare(struct hclge_dev *hdev)
3866 {
3867         int ret;
3868
3869         hdev->rst_stats.reset_cnt++;
3870         /* perform reset of the stack & ae device for a client */
3871         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3872         if (ret)
3873                 return ret;
3874
3875         rtnl_lock();
3876         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3877         rtnl_unlock();
3878         if (ret)
3879                 return ret;
3880
3881         return hclge_reset_prepare_wait(hdev);
3882 }
3883
3884 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3885 {
3886         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3887         enum hnae3_reset_type reset_level;
3888         int ret;
3889
3890         hdev->rst_stats.hw_reset_done_cnt++;
3891
3892         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3893         if (ret)
3894                 return ret;
3895
3896         rtnl_lock();
3897         ret = hclge_reset_stack(hdev);
3898         rtnl_unlock();
3899         if (ret)
3900                 return ret;
3901
3902         hclge_clear_reset_cause(hdev);
3903
3904         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3905         /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3906          * times
3907          */
3908         if (ret &&
3909             hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3910                 return ret;
3911
3912         ret = hclge_reset_prepare_up(hdev);
3913         if (ret)
3914                 return ret;
3915
3916         rtnl_lock();
3917         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3918         rtnl_unlock();
3919         if (ret)
3920                 return ret;
3921
3922         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3923         if (ret)
3924                 return ret;
3925
3926         hdev->last_reset_time = jiffies;
3927         hdev->rst_stats.reset_fail_cnt = 0;
3928         hdev->rst_stats.reset_done_cnt++;
3929         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3930
3931         /* if default_reset_request has a higher level reset request,
3932          * it should be handled as soon as possible. since some errors
3933          * need this kind of reset to fix.
3934          */
3935         reset_level = hclge_get_reset_level(ae_dev,
3936                                             &hdev->default_reset_request);
3937         if (reset_level != HNAE3_NONE_RESET)
3938                 set_bit(reset_level, &hdev->reset_request);
3939
3940         return 0;
3941 }
3942
3943 static void hclge_reset(struct hclge_dev *hdev)
3944 {
3945         if (hclge_reset_prepare(hdev))
3946                 goto err_reset;
3947
3948         if (hclge_reset_wait(hdev))
3949                 goto err_reset;
3950
3951         if (hclge_reset_rebuild(hdev))
3952                 goto err_reset;
3953
3954         return;
3955
3956 err_reset:
3957         if (hclge_reset_err_handle(hdev))
3958                 hclge_reset_task_schedule(hdev);
3959 }
3960
3961 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3962 {
3963         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3964         struct hclge_dev *hdev = ae_dev->priv;
3965
3966         /* We might end up getting called broadly because of 2 below cases:
3967          * 1. Recoverable error was conveyed through APEI and only way to bring
3968          *    normalcy is to reset.
3969          * 2. A new reset request from the stack due to timeout
3970          *
3971          * For the first case,error event might not have ae handle available.
3972          * check if this is a new reset request and we are not here just because
3973          * last reset attempt did not succeed and watchdog hit us again. We will
3974          * know this if last reset request did not occur very recently (watchdog
3975          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3976          * In case of new request we reset the "reset level" to PF reset.
3977          * And if it is a repeat reset request of the most recent one then we
3978          * want to make sure we throttle the reset request. Therefore, we will
3979          * not allow it again before 3*HZ times.
3980          */
3981         if (!handle)
3982                 handle = &hdev->vport[0].nic;
3983
3984         if (time_before(jiffies, (hdev->last_reset_time +
3985                                   HCLGE_RESET_INTERVAL))) {
3986                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3987                 return;
3988         } else if (hdev->default_reset_request) {
3989                 hdev->reset_level =
3990                         hclge_get_reset_level(ae_dev,
3991                                               &hdev->default_reset_request);
3992         } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3993                 hdev->reset_level = HNAE3_FUNC_RESET;
3994         }
3995
3996         dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3997                  hdev->reset_level);
3998
3999         /* request reset & schedule reset task */
4000         set_bit(hdev->reset_level, &hdev->reset_request);
4001         hclge_reset_task_schedule(hdev);
4002
4003         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4004                 hdev->reset_level++;
4005 }
4006
4007 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4008                                         enum hnae3_reset_type rst_type)
4009 {
4010         struct hclge_dev *hdev = ae_dev->priv;
4011
4012         set_bit(rst_type, &hdev->default_reset_request);
4013 }
4014
4015 static void hclge_reset_timer(struct timer_list *t)
4016 {
4017         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4018
4019         /* if default_reset_request has no value, it means that this reset
4020          * request has already be handled, so just return here
4021          */
4022         if (!hdev->default_reset_request)
4023                 return;
4024
4025         dev_info(&hdev->pdev->dev,
4026                  "triggering reset in reset timer\n");
4027         hclge_reset_event(hdev->pdev, NULL);
4028 }
4029
4030 static void hclge_reset_subtask(struct hclge_dev *hdev)
4031 {
4032         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4033
4034         /* check if there is any ongoing reset in the hardware. This status can
4035          * be checked from reset_pending. If there is then, we need to wait for
4036          * hardware to complete reset.
4037          *    a. If we are able to figure out in reasonable time that hardware
4038          *       has fully resetted then, we can proceed with driver, client
4039          *       reset.
4040          *    b. else, we can come back later to check this status so re-sched
4041          *       now.
4042          */
4043         hdev->last_reset_time = jiffies;
4044         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4045         if (hdev->reset_type != HNAE3_NONE_RESET)
4046                 hclge_reset(hdev);
4047
4048         /* check if we got any *new* reset requests to be honored */
4049         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4050         if (hdev->reset_type != HNAE3_NONE_RESET)
4051                 hclge_do_reset(hdev);
4052
4053         hdev->reset_type = HNAE3_NONE_RESET;
4054 }
4055
4056 static void hclge_reset_service_task(struct hclge_dev *hdev)
4057 {
4058         if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4059                 return;
4060
4061         down(&hdev->reset_sem);
4062         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4063
4064         hclge_reset_subtask(hdev);
4065
4066         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4067         up(&hdev->reset_sem);
4068 }
4069
4070 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4071 {
4072         int i;
4073
4074         /* start from vport 1 for PF is always alive */
4075         for (i = 1; i < hdev->num_alloc_vport; i++) {
4076                 struct hclge_vport *vport = &hdev->vport[i];
4077
4078                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4079                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4080
4081                 /* If vf is not alive, set to default value */
4082                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4083                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4084         }
4085 }
4086
4087 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4088 {
4089         unsigned long delta = round_jiffies_relative(HZ);
4090
4091         if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4092                 return;
4093
4094         /* Always handle the link updating to make sure link state is
4095          * updated when it is triggered by mbx.
4096          */
4097         hclge_update_link_status(hdev);
4098         hclge_sync_mac_table(hdev);
4099         hclge_sync_promisc_mode(hdev);
4100
4101         if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4102                 delta = jiffies - hdev->last_serv_processed;
4103
4104                 if (delta < round_jiffies_relative(HZ)) {
4105                         delta = round_jiffies_relative(HZ) - delta;
4106                         goto out;
4107                 }
4108         }
4109
4110         hdev->serv_processed_cnt++;
4111         hclge_update_vport_alive(hdev);
4112
4113         if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4114                 hdev->last_serv_processed = jiffies;
4115                 goto out;
4116         }
4117
4118         if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4119                 hclge_update_stats_for_all(hdev);
4120
4121         hclge_update_port_info(hdev);
4122         hclge_sync_vlan_filter(hdev);
4123
4124         if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4125                 hclge_rfs_filter_expire(hdev);
4126
4127         hdev->last_serv_processed = jiffies;
4128
4129 out:
4130         hclge_task_schedule(hdev, delta);
4131 }
4132
4133 static void hclge_service_task(struct work_struct *work)
4134 {
4135         struct hclge_dev *hdev =
4136                 container_of(work, struct hclge_dev, service_task.work);
4137
4138         hclge_reset_service_task(hdev);
4139         hclge_mailbox_service_task(hdev);
4140         hclge_periodic_service_task(hdev);
4141
4142         /* Handle reset and mbx again in case periodical task delays the
4143          * handling by calling hclge_task_schedule() in
4144          * hclge_periodic_service_task().
4145          */
4146         hclge_reset_service_task(hdev);
4147         hclge_mailbox_service_task(hdev);
4148 }
4149
4150 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4151 {
4152         /* VF handle has no client */
4153         if (!handle->client)
4154                 return container_of(handle, struct hclge_vport, nic);
4155         else if (handle->client->type == HNAE3_CLIENT_ROCE)
4156                 return container_of(handle, struct hclge_vport, roce);
4157         else
4158                 return container_of(handle, struct hclge_vport, nic);
4159 }
4160
4161 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4162                                   struct hnae3_vector_info *vector_info)
4163 {
4164 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2  64
4165
4166         vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4167
4168         /* need an extend offset to config vector >= 64 */
4169         if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4170                 vector_info->io_addr = hdev->hw.io_base +
4171                                 HCLGE_VECTOR_REG_BASE +
4172                                 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4173         else
4174                 vector_info->io_addr = hdev->hw.io_base +
4175                                 HCLGE_VECTOR_EXT_REG_BASE +
4176                                 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4177                                 HCLGE_VECTOR_REG_OFFSET_H +
4178                                 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4179                                 HCLGE_VECTOR_REG_OFFSET;
4180
4181         hdev->vector_status[idx] = hdev->vport[0].vport_id;
4182         hdev->vector_irq[idx] = vector_info->vector;
4183 }
4184
4185 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4186                             struct hnae3_vector_info *vector_info)
4187 {
4188         struct hclge_vport *vport = hclge_get_vport(handle);
4189         struct hnae3_vector_info *vector = vector_info;
4190         struct hclge_dev *hdev = vport->back;
4191         int alloc = 0;
4192         u16 i = 0;
4193         u16 j;
4194
4195         vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4196         vector_num = min(hdev->num_msi_left, vector_num);
4197
4198         for (j = 0; j < vector_num; j++) {
4199                 while (++i < hdev->num_nic_msi) {
4200                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4201                                 hclge_get_vector_info(hdev, i, vector);
4202                                 vector++;
4203                                 alloc++;
4204
4205                                 break;
4206                         }
4207                 }
4208         }
4209         hdev->num_msi_left -= alloc;
4210         hdev->num_msi_used += alloc;
4211
4212         return alloc;
4213 }
4214
4215 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4216 {
4217         int i;
4218
4219         for (i = 0; i < hdev->num_msi; i++)
4220                 if (vector == hdev->vector_irq[i])
4221                         return i;
4222
4223         return -EINVAL;
4224 }
4225
4226 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4227 {
4228         struct hclge_vport *vport = hclge_get_vport(handle);
4229         struct hclge_dev *hdev = vport->back;
4230         int vector_id;
4231
4232         vector_id = hclge_get_vector_index(hdev, vector);
4233         if (vector_id < 0) {
4234                 dev_err(&hdev->pdev->dev,
4235                         "Get vector index fail. vector = %d\n", vector);
4236                 return vector_id;
4237         }
4238
4239         hclge_free_vector(hdev, vector_id);
4240
4241         return 0;
4242 }
4243
4244 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4245 {
4246         return HCLGE_RSS_KEY_SIZE;
4247 }
4248
4249 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4250                                   const u8 hfunc, const u8 *key)
4251 {
4252         struct hclge_rss_config_cmd *req;
4253         unsigned int key_offset = 0;
4254         struct hclge_desc desc;
4255         int key_counts;
4256         int key_size;
4257         int ret;
4258
4259         key_counts = HCLGE_RSS_KEY_SIZE;
4260         req = (struct hclge_rss_config_cmd *)desc.data;
4261
4262         while (key_counts) {
4263                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4264                                            false);
4265
4266                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4267                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4268
4269                 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4270                 memcpy(req->hash_key,
4271                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4272
4273                 key_counts -= key_size;
4274                 key_offset++;
4275                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4276                 if (ret) {
4277                         dev_err(&hdev->pdev->dev,
4278                                 "Configure RSS config fail, status = %d\n",
4279                                 ret);
4280                         return ret;
4281                 }
4282         }
4283         return 0;
4284 }
4285
4286 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4287 {
4288         struct hclge_rss_indirection_table_cmd *req;
4289         struct hclge_desc desc;
4290         int rss_cfg_tbl_num;
4291         u8 rss_msb_oft;
4292         u8 rss_msb_val;
4293         int ret;
4294         u16 qid;
4295         int i;
4296         u32 j;
4297
4298         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4299         rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4300                           HCLGE_RSS_CFG_TBL_SIZE;
4301
4302         for (i = 0; i < rss_cfg_tbl_num; i++) {
4303                 hclge_cmd_setup_basic_desc
4304                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4305
4306                 req->start_table_index =
4307                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4308                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4309                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4310                         qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4311                         req->rss_qid_l[j] = qid & 0xff;
4312                         rss_msb_oft =
4313                                 j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4314                         rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4315                                 (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4316                         req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4317                 }
4318                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4319                 if (ret) {
4320                         dev_err(&hdev->pdev->dev,
4321                                 "Configure rss indir table fail,status = %d\n",
4322                                 ret);
4323                         return ret;
4324                 }
4325         }
4326         return 0;
4327 }
4328
4329 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4330                                  u16 *tc_size, u16 *tc_offset)
4331 {
4332         struct hclge_rss_tc_mode_cmd *req;
4333         struct hclge_desc desc;
4334         int ret;
4335         int i;
4336
4337         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4338         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4339
4340         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4341                 u16 mode = 0;
4342
4343                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4344                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4345                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4346                 hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4347                               tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4348                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4349                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4350
4351                 req->rss_tc_mode[i] = cpu_to_le16(mode);
4352         }
4353
4354         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4355         if (ret)
4356                 dev_err(&hdev->pdev->dev,
4357                         "Configure rss tc mode fail, status = %d\n", ret);
4358
4359         return ret;
4360 }
4361
4362 static void hclge_get_rss_type(struct hclge_vport *vport)
4363 {
4364         if (vport->rss_tuple_sets.ipv4_tcp_en ||
4365             vport->rss_tuple_sets.ipv4_udp_en ||
4366             vport->rss_tuple_sets.ipv4_sctp_en ||
4367             vport->rss_tuple_sets.ipv6_tcp_en ||
4368             vport->rss_tuple_sets.ipv6_udp_en ||
4369             vport->rss_tuple_sets.ipv6_sctp_en)
4370                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4371         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4372                  vport->rss_tuple_sets.ipv6_fragment_en)
4373                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4374         else
4375                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4376 }
4377
4378 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4379 {
4380         struct hclge_rss_input_tuple_cmd *req;
4381         struct hclge_desc desc;
4382         int ret;
4383
4384         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4385
4386         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4387
4388         /* Get the tuple cfg from pf */
4389         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4390         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4391         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4392         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4393         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4394         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4395         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4396         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4397         hclge_get_rss_type(&hdev->vport[0]);
4398         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4399         if (ret)
4400                 dev_err(&hdev->pdev->dev,
4401                         "Configure rss input fail, status = %d\n", ret);
4402         return ret;
4403 }
4404
4405 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4406                          u8 *key, u8 *hfunc)
4407 {
4408         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4409         struct hclge_vport *vport = hclge_get_vport(handle);
4410         int i;
4411
4412         /* Get hash algorithm */
4413         if (hfunc) {
4414                 switch (vport->rss_algo) {
4415                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4416                         *hfunc = ETH_RSS_HASH_TOP;
4417                         break;
4418                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4419                         *hfunc = ETH_RSS_HASH_XOR;
4420                         break;
4421                 default:
4422                         *hfunc = ETH_RSS_HASH_UNKNOWN;
4423                         break;
4424                 }
4425         }
4426
4427         /* Get the RSS Key required by the user */
4428         if (key)
4429                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4430
4431         /* Get indirect table */
4432         if (indir)
4433                 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4434                         indir[i] =  vport->rss_indirection_tbl[i];
4435
4436         return 0;
4437 }
4438
4439 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4440                          const  u8 *key, const  u8 hfunc)
4441 {
4442         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4443         struct hclge_vport *vport = hclge_get_vport(handle);
4444         struct hclge_dev *hdev = vport->back;
4445         u8 hash_algo;
4446         int ret, i;
4447
4448         /* Set the RSS Hash Key if specififed by the user */
4449         if (key) {
4450                 switch (hfunc) {
4451                 case ETH_RSS_HASH_TOP:
4452                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4453                         break;
4454                 case ETH_RSS_HASH_XOR:
4455                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4456                         break;
4457                 case ETH_RSS_HASH_NO_CHANGE:
4458                         hash_algo = vport->rss_algo;
4459                         break;
4460                 default:
4461                         return -EINVAL;
4462                 }
4463
4464                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4465                 if (ret)
4466                         return ret;
4467
4468                 /* Update the shadow RSS key with user specified qids */
4469                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4470                 vport->rss_algo = hash_algo;
4471         }
4472
4473         /* Update the shadow RSS table with user specified qids */
4474         for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4475                 vport->rss_indirection_tbl[i] = indir[i];
4476
4477         /* Update the hardware */
4478         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4479 }
4480
4481 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4482 {
4483         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4484
4485         if (nfc->data & RXH_L4_B_2_3)
4486                 hash_sets |= HCLGE_D_PORT_BIT;
4487         else
4488                 hash_sets &= ~HCLGE_D_PORT_BIT;
4489
4490         if (nfc->data & RXH_IP_SRC)
4491                 hash_sets |= HCLGE_S_IP_BIT;
4492         else
4493                 hash_sets &= ~HCLGE_S_IP_BIT;
4494
4495         if (nfc->data & RXH_IP_DST)
4496                 hash_sets |= HCLGE_D_IP_BIT;
4497         else
4498                 hash_sets &= ~HCLGE_D_IP_BIT;
4499
4500         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4501                 hash_sets |= HCLGE_V_TAG_BIT;
4502
4503         return hash_sets;
4504 }
4505
4506 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4507                                     struct ethtool_rxnfc *nfc,
4508                                     struct hclge_rss_input_tuple_cmd *req)
4509 {
4510         struct hclge_dev *hdev = vport->back;
4511         u8 tuple_sets;
4512
4513         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4514         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4515         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4516         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4517         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4518         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4519         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4520         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4521
4522         tuple_sets = hclge_get_rss_hash_bits(nfc);
4523         switch (nfc->flow_type) {
4524         case TCP_V4_FLOW:
4525                 req->ipv4_tcp_en = tuple_sets;
4526                 break;
4527         case TCP_V6_FLOW:
4528                 req->ipv6_tcp_en = tuple_sets;
4529                 break;
4530         case UDP_V4_FLOW:
4531                 req->ipv4_udp_en = tuple_sets;
4532                 break;
4533         case UDP_V6_FLOW:
4534                 req->ipv6_udp_en = tuple_sets;
4535                 break;
4536         case SCTP_V4_FLOW:
4537                 req->ipv4_sctp_en = tuple_sets;
4538                 break;
4539         case SCTP_V6_FLOW:
4540                 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4541                     (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4542                         return -EINVAL;
4543
4544                 req->ipv6_sctp_en = tuple_sets;
4545                 break;
4546         case IPV4_FLOW:
4547                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4548                 break;
4549         case IPV6_FLOW:
4550                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4551                 break;
4552         default:
4553                 return -EINVAL;
4554         }
4555
4556         return 0;
4557 }
4558
4559 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4560                                struct ethtool_rxnfc *nfc)
4561 {
4562         struct hclge_vport *vport = hclge_get_vport(handle);
4563         struct hclge_dev *hdev = vport->back;
4564         struct hclge_rss_input_tuple_cmd *req;
4565         struct hclge_desc desc;
4566         int ret;
4567
4568         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4569                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
4570                 return -EINVAL;
4571
4572         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4573         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4574
4575         ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4576         if (ret) {
4577                 dev_err(&hdev->pdev->dev,
4578                         "failed to init rss tuple cmd, ret = %d\n", ret);
4579                 return ret;
4580         }
4581
4582         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4583         if (ret) {
4584                 dev_err(&hdev->pdev->dev,
4585                         "Set rss tuple fail, status = %d\n", ret);
4586                 return ret;
4587         }
4588
4589         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4590         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4591         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4592         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4593         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4594         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4595         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4596         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4597         hclge_get_rss_type(vport);
4598         return 0;
4599 }
4600
4601 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4602                                      u8 *tuple_sets)
4603 {
4604         switch (flow_type) {
4605         case TCP_V4_FLOW:
4606                 *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4607                 break;
4608         case UDP_V4_FLOW:
4609                 *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4610                 break;
4611         case TCP_V6_FLOW:
4612                 *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4613                 break;
4614         case UDP_V6_FLOW:
4615                 *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4616                 break;
4617         case SCTP_V4_FLOW:
4618                 *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4619                 break;
4620         case SCTP_V6_FLOW:
4621                 *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4622                 break;
4623         case IPV4_FLOW:
4624         case IPV6_FLOW:
4625                 *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4626                 break;
4627         default:
4628                 return -EINVAL;
4629         }
4630
4631         return 0;
4632 }
4633
4634 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4635 {
4636         u64 tuple_data = 0;
4637
4638         if (tuple_sets & HCLGE_D_PORT_BIT)
4639                 tuple_data |= RXH_L4_B_2_3;
4640         if (tuple_sets & HCLGE_S_PORT_BIT)
4641                 tuple_data |= RXH_L4_B_0_1;
4642         if (tuple_sets & HCLGE_D_IP_BIT)
4643                 tuple_data |= RXH_IP_DST;
4644         if (tuple_sets & HCLGE_S_IP_BIT)
4645                 tuple_data |= RXH_IP_SRC;
4646
4647         return tuple_data;
4648 }
4649
4650 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4651                                struct ethtool_rxnfc *nfc)
4652 {
4653         struct hclge_vport *vport = hclge_get_vport(handle);
4654         u8 tuple_sets;
4655         int ret;
4656
4657         nfc->data = 0;
4658
4659         ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4660         if (ret || !tuple_sets)
4661                 return ret;
4662
4663         nfc->data = hclge_convert_rss_tuple(tuple_sets);
4664
4665         return 0;
4666 }
4667
4668 static int hclge_get_tc_size(struct hnae3_handle *handle)
4669 {
4670         struct hclge_vport *vport = hclge_get_vport(handle);
4671         struct hclge_dev *hdev = vport->back;
4672
4673         return hdev->pf_rss_size_max;
4674 }
4675
4676 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4677 {
4678         struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4679         struct hclge_vport *vport = hdev->vport;
4680         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4681         u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4682         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4683         struct hnae3_tc_info *tc_info;
4684         u16 roundup_size;
4685         u16 rss_size;
4686         int i;
4687
4688         tc_info = &vport->nic.kinfo.tc_info;
4689         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4690                 rss_size = tc_info->tqp_count[i];
4691                 tc_valid[i] = 0;
4692
4693                 if (!(hdev->hw_tc_map & BIT(i)))
4694                         continue;
4695
4696                 /* tc_size set to hardware is the log2 of roundup power of two
4697                  * of rss_size, the acutal queue size is limited by indirection
4698                  * table.
4699                  */
4700                 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4701                     rss_size == 0) {
4702                         dev_err(&hdev->pdev->dev,
4703                                 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4704                                 rss_size);
4705                         return -EINVAL;
4706                 }
4707
4708                 roundup_size = roundup_pow_of_two(rss_size);
4709                 roundup_size = ilog2(roundup_size);
4710
4711                 tc_valid[i] = 1;
4712                 tc_size[i] = roundup_size;
4713                 tc_offset[i] = tc_info->tqp_offset[i];
4714         }
4715
4716         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4717 }
4718
4719 int hclge_rss_init_hw(struct hclge_dev *hdev)
4720 {
4721         struct hclge_vport *vport = hdev->vport;
4722         u16 *rss_indir = vport[0].rss_indirection_tbl;
4723         u8 *key = vport[0].rss_hash_key;
4724         u8 hfunc = vport[0].rss_algo;
4725         int ret;
4726
4727         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4728         if (ret)
4729                 return ret;
4730
4731         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4732         if (ret)
4733                 return ret;
4734
4735         ret = hclge_set_rss_input_tuple(hdev);
4736         if (ret)
4737                 return ret;
4738
4739         return hclge_init_rss_tc_mode(hdev);
4740 }
4741
4742 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4743 {
4744         struct hclge_vport *vport = hdev->vport;
4745         int i, j;
4746
4747         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4748                 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
4749                         vport[j].rss_indirection_tbl[i] =
4750                                 i % vport[j].alloc_rss_size;
4751         }
4752 }
4753
4754 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
4755 {
4756         u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
4757         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4758         struct hclge_vport *vport = hdev->vport;
4759
4760         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
4761                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4762
4763         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4764                 u16 *rss_ind_tbl;
4765
4766                 vport[i].rss_tuple_sets.ipv4_tcp_en =
4767                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4768                 vport[i].rss_tuple_sets.ipv4_udp_en =
4769                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4770                 vport[i].rss_tuple_sets.ipv4_sctp_en =
4771                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4772                 vport[i].rss_tuple_sets.ipv4_fragment_en =
4773                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4774                 vport[i].rss_tuple_sets.ipv6_tcp_en =
4775                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4776                 vport[i].rss_tuple_sets.ipv6_udp_en =
4777                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4778                 vport[i].rss_tuple_sets.ipv6_sctp_en =
4779                         hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
4780                         HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
4781                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4782                 vport[i].rss_tuple_sets.ipv6_fragment_en =
4783                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4784
4785                 vport[i].rss_algo = rss_algo;
4786
4787                 rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
4788                                            sizeof(*rss_ind_tbl), GFP_KERNEL);
4789                 if (!rss_ind_tbl)
4790                         return -ENOMEM;
4791
4792                 vport[i].rss_indirection_tbl = rss_ind_tbl;
4793                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4794                        HCLGE_RSS_KEY_SIZE);
4795         }
4796
4797         hclge_rss_indir_init_cfg(hdev);
4798
4799         return 0;
4800 }
4801
4802 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4803                                 int vector_id, bool en,
4804                                 struct hnae3_ring_chain_node *ring_chain)
4805 {
4806         struct hclge_dev *hdev = vport->back;
4807         struct hnae3_ring_chain_node *node;
4808         struct hclge_desc desc;
4809         struct hclge_ctrl_vector_chain_cmd *req =
4810                 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4811         enum hclge_cmd_status status;
4812         enum hclge_opcode_type op;
4813         u16 tqp_type_and_id;
4814         int i;
4815
4816         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4817         hclge_cmd_setup_basic_desc(&desc, op, false);
4818         req->int_vector_id_l = hnae3_get_field(vector_id,
4819                                                HCLGE_VECTOR_ID_L_M,
4820                                                HCLGE_VECTOR_ID_L_S);
4821         req->int_vector_id_h = hnae3_get_field(vector_id,
4822                                                HCLGE_VECTOR_ID_H_M,
4823                                                HCLGE_VECTOR_ID_H_S);
4824
4825         i = 0;
4826         for (node = ring_chain; node; node = node->next) {
4827                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4828                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4829                                 HCLGE_INT_TYPE_S,
4830                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4831                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4832                                 HCLGE_TQP_ID_S, node->tqp_index);
4833                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4834                                 HCLGE_INT_GL_IDX_S,
4835                                 hnae3_get_field(node->int_gl_idx,
4836                                                 HNAE3_RING_GL_IDX_M,
4837                                                 HNAE3_RING_GL_IDX_S));
4838                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4839                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4840                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4841                         req->vfid = vport->vport_id;
4842
4843                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4844                         if (status) {
4845                                 dev_err(&hdev->pdev->dev,
4846                                         "Map TQP fail, status is %d.\n",
4847                                         status);
4848                                 return -EIO;
4849                         }
4850                         i = 0;
4851
4852                         hclge_cmd_setup_basic_desc(&desc,
4853                                                    op,
4854                                                    false);
4855                         req->int_vector_id_l =
4856                                 hnae3_get_field(vector_id,
4857                                                 HCLGE_VECTOR_ID_L_M,
4858                                                 HCLGE_VECTOR_ID_L_S);
4859                         req->int_vector_id_h =
4860                                 hnae3_get_field(vector_id,
4861                                                 HCLGE_VECTOR_ID_H_M,
4862                                                 HCLGE_VECTOR_ID_H_S);
4863                 }
4864         }
4865
4866         if (i > 0) {
4867                 req->int_cause_num = i;
4868                 req->vfid = vport->vport_id;
4869                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4870                 if (status) {
4871                         dev_err(&hdev->pdev->dev,
4872                                 "Map TQP fail, status is %d.\n", status);
4873                         return -EIO;
4874                 }
4875         }
4876
4877         return 0;
4878 }
4879
4880 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4881                                     struct hnae3_ring_chain_node *ring_chain)
4882 {
4883         struct hclge_vport *vport = hclge_get_vport(handle);
4884         struct hclge_dev *hdev = vport->back;
4885         int vector_id;
4886
4887         vector_id = hclge_get_vector_index(hdev, vector);
4888         if (vector_id < 0) {
4889                 dev_err(&hdev->pdev->dev,
4890                         "failed to get vector index. vector=%d\n", vector);
4891                 return vector_id;
4892         }
4893
4894         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4895 }
4896
4897 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4898                                        struct hnae3_ring_chain_node *ring_chain)
4899 {
4900         struct hclge_vport *vport = hclge_get_vport(handle);
4901         struct hclge_dev *hdev = vport->back;
4902         int vector_id, ret;
4903
4904         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4905                 return 0;
4906
4907         vector_id = hclge_get_vector_index(hdev, vector);
4908         if (vector_id < 0) {
4909                 dev_err(&handle->pdev->dev,
4910                         "Get vector index fail. ret =%d\n", vector_id);
4911                 return vector_id;
4912         }
4913
4914         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4915         if (ret)
4916                 dev_err(&handle->pdev->dev,
4917                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4918                         vector_id, ret);
4919
4920         return ret;
4921 }
4922
4923 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
4924                                       bool en_uc, bool en_mc, bool en_bc)
4925 {
4926         struct hclge_vport *vport = &hdev->vport[vf_id];
4927         struct hnae3_handle *handle = &vport->nic;
4928         struct hclge_promisc_cfg_cmd *req;
4929         struct hclge_desc desc;
4930         bool uc_tx_en = en_uc;
4931         u8 promisc_cfg = 0;
4932         int ret;
4933
4934         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4935
4936         req = (struct hclge_promisc_cfg_cmd *)desc.data;
4937         req->vf_id = vf_id;
4938
4939         if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
4940                 uc_tx_en = false;
4941
4942         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
4943         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
4944         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
4945         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
4946         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
4947         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
4948         req->extend_promisc = promisc_cfg;
4949
4950         /* to be compatible with DEVICE_VERSION_V1/2 */
4951         promisc_cfg = 0;
4952         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
4953         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
4954         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
4955         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
4956         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
4957         req->promisc = promisc_cfg;
4958
4959         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4960         if (ret)
4961                 dev_err(&hdev->pdev->dev,
4962                         "failed to set vport %u promisc mode, ret = %d.\n",
4963                         vf_id, ret);
4964
4965         return ret;
4966 }
4967
4968 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4969                                  bool en_mc_pmc, bool en_bc_pmc)
4970 {
4971         return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
4972                                           en_uc_pmc, en_mc_pmc, en_bc_pmc);
4973 }
4974
4975 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4976                                   bool en_mc_pmc)
4977 {
4978         struct hclge_vport *vport = hclge_get_vport(handle);
4979         struct hclge_dev *hdev = vport->back;
4980         bool en_bc_pmc = true;
4981
4982         /* For device whose version below V2, if broadcast promisc enabled,
4983          * vlan filter is always bypassed. So broadcast promisc should be
4984          * disabled until user enable promisc mode
4985          */
4986         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
4987                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4988
4989         return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4990                                             en_bc_pmc);
4991 }
4992
4993 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
4994 {
4995         struct hclge_vport *vport = hclge_get_vport(handle);
4996         struct hclge_dev *hdev = vport->back;
4997
4998         set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
4999 }
5000
5001 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5002 {
5003         struct hclge_get_fd_mode_cmd *req;
5004         struct hclge_desc desc;
5005         int ret;
5006
5007         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5008
5009         req = (struct hclge_get_fd_mode_cmd *)desc.data;
5010
5011         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5012         if (ret) {
5013                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5014                 return ret;
5015         }
5016
5017         *fd_mode = req->mode;
5018
5019         return ret;
5020 }
5021
5022 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5023                                    u32 *stage1_entry_num,
5024                                    u32 *stage2_entry_num,
5025                                    u16 *stage1_counter_num,
5026                                    u16 *stage2_counter_num)
5027 {
5028         struct hclge_get_fd_allocation_cmd *req;
5029         struct hclge_desc desc;
5030         int ret;
5031
5032         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5033
5034         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5035
5036         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5037         if (ret) {
5038                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5039                         ret);
5040                 return ret;
5041         }
5042
5043         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5044         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5045         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5046         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5047
5048         return ret;
5049 }
5050
5051 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5052                                    enum HCLGE_FD_STAGE stage_num)
5053 {
5054         struct hclge_set_fd_key_config_cmd *req;
5055         struct hclge_fd_key_cfg *stage;
5056         struct hclge_desc desc;
5057         int ret;
5058
5059         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5060
5061         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5062         stage = &hdev->fd_cfg.key_cfg[stage_num];
5063         req->stage = stage_num;
5064         req->key_select = stage->key_sel;
5065         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5066         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5067         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5068         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5069         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5070         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5071
5072         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5073         if (ret)
5074                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5075
5076         return ret;
5077 }
5078
5079 static int hclge_init_fd_config(struct hclge_dev *hdev)
5080 {
5081 #define LOW_2_WORDS             0x03
5082         struct hclge_fd_key_cfg *key_cfg;
5083         int ret;
5084
5085         if (!hnae3_dev_fd_supported(hdev))
5086                 return 0;
5087
5088         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5089         if (ret)
5090                 return ret;
5091
5092         switch (hdev->fd_cfg.fd_mode) {
5093         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5094                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5095                 break;
5096         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5097                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5098                 break;
5099         default:
5100                 dev_err(&hdev->pdev->dev,
5101                         "Unsupported flow director mode %u\n",
5102                         hdev->fd_cfg.fd_mode);
5103                 return -EOPNOTSUPP;
5104         }
5105
5106         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5107         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5108         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5109         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5110         key_cfg->outer_sipv6_word_en = 0;
5111         key_cfg->outer_dipv6_word_en = 0;
5112
5113         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5114                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5115                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5116                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5117
5118         /* If use max 400bit key, we can support tuples for ether type */
5119         if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
5120                 key_cfg->tuple_active |=
5121                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5122
5123         /* roce_type is used to filter roce frames
5124          * dst_vport is used to specify the rule
5125          */
5126         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5127
5128         ret = hclge_get_fd_allocation(hdev,
5129                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5130                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5131                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5132                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5133         if (ret)
5134                 return ret;
5135
5136         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5137 }
5138
5139 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5140                                 int loc, u8 *key, bool is_add)
5141 {
5142         struct hclge_fd_tcam_config_1_cmd *req1;
5143         struct hclge_fd_tcam_config_2_cmd *req2;
5144         struct hclge_fd_tcam_config_3_cmd *req3;
5145         struct hclge_desc desc[3];
5146         int ret;
5147
5148         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5149         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5150         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5151         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5152         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5153
5154         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5155         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5156         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5157
5158         req1->stage = stage;
5159         req1->xy_sel = sel_x ? 1 : 0;
5160         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5161         req1->index = cpu_to_le32(loc);
5162         req1->entry_vld = sel_x ? is_add : 0;
5163
5164         if (key) {
5165                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5166                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5167                        sizeof(req2->tcam_data));
5168                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5169                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5170         }
5171
5172         ret = hclge_cmd_send(&hdev->hw, desc, 3);
5173         if (ret)
5174                 dev_err(&hdev->pdev->dev,
5175                         "config tcam key fail, ret=%d\n",
5176                         ret);
5177
5178         return ret;
5179 }
5180
5181 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5182                               struct hclge_fd_ad_data *action)
5183 {
5184         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5185         struct hclge_fd_ad_config_cmd *req;
5186         struct hclge_desc desc;
5187         u64 ad_data = 0;
5188         int ret;
5189
5190         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5191
5192         req = (struct hclge_fd_ad_config_cmd *)desc.data;
5193         req->index = cpu_to_le32(loc);
5194         req->stage = stage;
5195
5196         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5197                       action->write_rule_id_to_bd);
5198         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5199                         action->rule_id);
5200         if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5201                 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5202                               action->override_tc);
5203                 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5204                                 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5205         }
5206         ad_data <<= 32;
5207         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5208         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5209                       action->forward_to_direct_queue);
5210         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5211                         action->queue_id);
5212         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5213         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5214                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5215         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5216         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5217                         action->counter_id);
5218
5219         req->ad_data = cpu_to_le64(ad_data);
5220         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5221         if (ret)
5222                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5223
5224         return ret;
5225 }
5226
5227 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5228                                    struct hclge_fd_rule *rule)
5229 {
5230         u16 tmp_x_s, tmp_y_s;
5231         u32 tmp_x_l, tmp_y_l;
5232         int i;
5233
5234         if (rule->unused_tuple & tuple_bit)
5235                 return true;
5236
5237         switch (tuple_bit) {
5238         case BIT(INNER_DST_MAC):
5239                 for (i = 0; i < ETH_ALEN; i++) {
5240                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5241                                rule->tuples_mask.dst_mac[i]);
5242                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5243                                rule->tuples_mask.dst_mac[i]);
5244                 }
5245
5246                 return true;
5247         case BIT(INNER_SRC_MAC):
5248                 for (i = 0; i < ETH_ALEN; i++) {
5249                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5250                                rule->tuples_mask.src_mac[i]);
5251                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5252                                rule->tuples_mask.src_mac[i]);
5253                 }
5254
5255                 return true;
5256         case BIT(INNER_VLAN_TAG_FST):
5257                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5258                        rule->tuples_mask.vlan_tag1);
5259                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5260                        rule->tuples_mask.vlan_tag1);
5261                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5262                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5263
5264                 return true;
5265         case BIT(INNER_ETH_TYPE):
5266                 calc_x(tmp_x_s, rule->tuples.ether_proto,
5267                        rule->tuples_mask.ether_proto);
5268                 calc_y(tmp_y_s, rule->tuples.ether_proto,
5269                        rule->tuples_mask.ether_proto);
5270                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5271                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5272
5273                 return true;
5274         case BIT(INNER_IP_TOS):
5275                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5276                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5277
5278                 return true;
5279         case BIT(INNER_IP_PROTO):
5280                 calc_x(*key_x, rule->tuples.ip_proto,
5281                        rule->tuples_mask.ip_proto);
5282                 calc_y(*key_y, rule->tuples.ip_proto,
5283                        rule->tuples_mask.ip_proto);
5284
5285                 return true;
5286         case BIT(INNER_SRC_IP):
5287                 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5288                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5289                 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5290                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5291                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5292                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5293
5294                 return true;
5295         case BIT(INNER_DST_IP):
5296                 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5297                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5298                 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5299                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5300                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5301                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5302
5303                 return true;
5304         case BIT(INNER_SRC_PORT):
5305                 calc_x(tmp_x_s, rule->tuples.src_port,
5306                        rule->tuples_mask.src_port);
5307                 calc_y(tmp_y_s, rule->tuples.src_port,
5308                        rule->tuples_mask.src_port);
5309                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5310                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5311
5312                 return true;
5313         case BIT(INNER_DST_PORT):
5314                 calc_x(tmp_x_s, rule->tuples.dst_port,
5315                        rule->tuples_mask.dst_port);
5316                 calc_y(tmp_y_s, rule->tuples.dst_port,
5317                        rule->tuples_mask.dst_port);
5318                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5319                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5320
5321                 return true;
5322         default:
5323                 return false;
5324         }
5325 }
5326
5327 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5328                                  u8 vf_id, u8 network_port_id)
5329 {
5330         u32 port_number = 0;
5331
5332         if (port_type == HOST_PORT) {
5333                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5334                                 pf_id);
5335                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5336                                 vf_id);
5337                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5338         } else {
5339                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5340                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5341                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5342         }
5343
5344         return port_number;
5345 }
5346
5347 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5348                                        __le32 *key_x, __le32 *key_y,
5349                                        struct hclge_fd_rule *rule)
5350 {
5351         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5352         u8 cur_pos = 0, tuple_size, shift_bits;
5353         unsigned int i;
5354
5355         for (i = 0; i < MAX_META_DATA; i++) {
5356                 tuple_size = meta_data_key_info[i].key_length;
5357                 tuple_bit = key_cfg->meta_data_active & BIT(i);
5358
5359                 switch (tuple_bit) {
5360                 case BIT(ROCE_TYPE):
5361                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5362                         cur_pos += tuple_size;
5363                         break;
5364                 case BIT(DST_VPORT):
5365                         port_number = hclge_get_port_number(HOST_PORT, 0,
5366                                                             rule->vf_id, 0);
5367                         hnae3_set_field(meta_data,
5368                                         GENMASK(cur_pos + tuple_size, cur_pos),
5369                                         cur_pos, port_number);
5370                         cur_pos += tuple_size;
5371                         break;
5372                 default:
5373                         break;
5374                 }
5375         }
5376
5377         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5378         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5379         shift_bits = sizeof(meta_data) * 8 - cur_pos;
5380
5381         *key_x = cpu_to_le32(tmp_x << shift_bits);
5382         *key_y = cpu_to_le32(tmp_y << shift_bits);
5383 }
5384
5385 /* A complete key is combined with meta data key and tuple key.
5386  * Meta data key is stored at the MSB region, and tuple key is stored at
5387  * the LSB region, unused bits will be filled 0.
5388  */
5389 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5390                             struct hclge_fd_rule *rule)
5391 {
5392         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5393         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5394         u8 *cur_key_x, *cur_key_y;
5395         u8 meta_data_region;
5396         u8 tuple_size;
5397         int ret;
5398         u32 i;
5399
5400         memset(key_x, 0, sizeof(key_x));
5401         memset(key_y, 0, sizeof(key_y));
5402         cur_key_x = key_x;
5403         cur_key_y = key_y;
5404
5405         for (i = 0 ; i < MAX_TUPLE; i++) {
5406                 bool tuple_valid;
5407                 u32 check_tuple;
5408
5409                 tuple_size = tuple_key_info[i].key_length / 8;
5410                 check_tuple = key_cfg->tuple_active & BIT(i);
5411
5412                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5413                                                      cur_key_y, rule);
5414                 if (tuple_valid) {
5415                         cur_key_x += tuple_size;
5416                         cur_key_y += tuple_size;
5417                 }
5418         }
5419
5420         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5421                         MAX_META_DATA_LENGTH / 8;
5422
5423         hclge_fd_convert_meta_data(key_cfg,
5424                                    (__le32 *)(key_x + meta_data_region),
5425                                    (__le32 *)(key_y + meta_data_region),
5426                                    rule);
5427
5428         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5429                                    true);
5430         if (ret) {
5431                 dev_err(&hdev->pdev->dev,
5432                         "fd key_y config fail, loc=%u, ret=%d\n",
5433                         rule->queue_id, ret);
5434                 return ret;
5435         }
5436
5437         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5438                                    true);
5439         if (ret)
5440                 dev_err(&hdev->pdev->dev,
5441                         "fd key_x config fail, loc=%u, ret=%d\n",
5442                         rule->queue_id, ret);
5443         return ret;
5444 }
5445
5446 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5447                                struct hclge_fd_rule *rule)
5448 {
5449         struct hclge_vport *vport = hdev->vport;
5450         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5451         struct hclge_fd_ad_data ad_data;
5452
5453         memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5454         ad_data.ad_id = rule->location;
5455
5456         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5457                 ad_data.drop_packet = true;
5458         } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5459                 ad_data.override_tc = true;
5460                 ad_data.queue_id =
5461                         kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5462                 ad_data.tc_size =
5463                         ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5464         } else {
5465                 ad_data.forward_to_direct_queue = true;
5466                 ad_data.queue_id = rule->queue_id;
5467         }
5468
5469         ad_data.use_counter = false;
5470         ad_data.counter_id = 0;
5471
5472         ad_data.use_next_stage = false;
5473         ad_data.next_input_key = 0;
5474
5475         ad_data.write_rule_id_to_bd = true;
5476         ad_data.rule_id = rule->location;
5477
5478         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5479 }
5480
5481 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5482                                        u32 *unused_tuple)
5483 {
5484         if (!spec || !unused_tuple)
5485                 return -EINVAL;
5486
5487         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5488
5489         if (!spec->ip4src)
5490                 *unused_tuple |= BIT(INNER_SRC_IP);
5491
5492         if (!spec->ip4dst)
5493                 *unused_tuple |= BIT(INNER_DST_IP);
5494
5495         if (!spec->psrc)
5496                 *unused_tuple |= BIT(INNER_SRC_PORT);
5497
5498         if (!spec->pdst)
5499                 *unused_tuple |= BIT(INNER_DST_PORT);
5500
5501         if (!spec->tos)
5502                 *unused_tuple |= BIT(INNER_IP_TOS);
5503
5504         return 0;
5505 }
5506
5507 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5508                                     u32 *unused_tuple)
5509 {
5510         if (!spec || !unused_tuple)
5511                 return -EINVAL;
5512
5513         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5514                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5515
5516         if (!spec->ip4src)
5517                 *unused_tuple |= BIT(INNER_SRC_IP);
5518
5519         if (!spec->ip4dst)
5520                 *unused_tuple |= BIT(INNER_DST_IP);
5521
5522         if (!spec->tos)
5523                 *unused_tuple |= BIT(INNER_IP_TOS);
5524
5525         if (!spec->proto)
5526                 *unused_tuple |= BIT(INNER_IP_PROTO);
5527
5528         if (spec->l4_4_bytes)
5529                 return -EOPNOTSUPP;
5530
5531         if (spec->ip_ver != ETH_RX_NFC_IP4)
5532                 return -EOPNOTSUPP;
5533
5534         return 0;
5535 }
5536
5537 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5538                                        u32 *unused_tuple)
5539 {
5540         if (!spec || !unused_tuple)
5541                 return -EINVAL;
5542
5543         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5544                 BIT(INNER_IP_TOS);
5545
5546         /* check whether src/dst ip address used */
5547         if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5548                 *unused_tuple |= BIT(INNER_SRC_IP);
5549
5550         if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5551                 *unused_tuple |= BIT(INNER_DST_IP);
5552
5553         if (!spec->psrc)
5554                 *unused_tuple |= BIT(INNER_SRC_PORT);
5555
5556         if (!spec->pdst)
5557                 *unused_tuple |= BIT(INNER_DST_PORT);
5558
5559         if (spec->tclass)
5560                 return -EOPNOTSUPP;
5561
5562         return 0;
5563 }
5564
5565 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5566                                     u32 *unused_tuple)
5567 {
5568         if (!spec || !unused_tuple)
5569                 return -EINVAL;
5570
5571         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5572                 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5573
5574         /* check whether src/dst ip address used */
5575         if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5576                 *unused_tuple |= BIT(INNER_SRC_IP);
5577
5578         if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5579                 *unused_tuple |= BIT(INNER_DST_IP);
5580
5581         if (!spec->l4_proto)
5582                 *unused_tuple |= BIT(INNER_IP_PROTO);
5583
5584         if (spec->tclass)
5585                 return -EOPNOTSUPP;
5586
5587         if (spec->l4_4_bytes)
5588                 return -EOPNOTSUPP;
5589
5590         return 0;
5591 }
5592
5593 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5594 {
5595         if (!spec || !unused_tuple)
5596                 return -EINVAL;
5597
5598         *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5599                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5600                 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5601
5602         if (is_zero_ether_addr(spec->h_source))
5603                 *unused_tuple |= BIT(INNER_SRC_MAC);
5604
5605         if (is_zero_ether_addr(spec->h_dest))
5606                 *unused_tuple |= BIT(INNER_DST_MAC);
5607
5608         if (!spec->h_proto)
5609                 *unused_tuple |= BIT(INNER_ETH_TYPE);
5610
5611         return 0;
5612 }
5613
5614 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5615                                     struct ethtool_rx_flow_spec *fs,
5616                                     u32 *unused_tuple)
5617 {
5618         if (fs->flow_type & FLOW_EXT) {
5619                 if (fs->h_ext.vlan_etype) {
5620                         dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5621                         return -EOPNOTSUPP;
5622                 }
5623
5624                 if (!fs->h_ext.vlan_tci)
5625                         *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5626
5627                 if (fs->m_ext.vlan_tci &&
5628                     be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5629                         dev_err(&hdev->pdev->dev,
5630                                 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
5631                                 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5632                         return -EINVAL;
5633                 }
5634         } else {
5635                 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5636         }
5637
5638         if (fs->flow_type & FLOW_MAC_EXT) {
5639                 if (hdev->fd_cfg.fd_mode !=
5640                     HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5641                         dev_err(&hdev->pdev->dev,
5642                                 "FLOW_MAC_EXT is not supported in current fd mode!\n");
5643                         return -EOPNOTSUPP;
5644                 }
5645
5646                 if (is_zero_ether_addr(fs->h_ext.h_dest))
5647                         *unused_tuple |= BIT(INNER_DST_MAC);
5648                 else
5649                         *unused_tuple &= ~BIT(INNER_DST_MAC);
5650         }
5651
5652         return 0;
5653 }
5654
5655 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5656                                struct ethtool_rx_flow_spec *fs,
5657                                u32 *unused_tuple)
5658 {
5659         u32 flow_type;
5660         int ret;
5661
5662         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5663                 dev_err(&hdev->pdev->dev,
5664                         "failed to config fd rules, invalid rule location: %u, max is %u\n.",
5665                         fs->location,
5666                         hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5667                 return -EINVAL;
5668         }
5669
5670         if ((fs->flow_type & FLOW_EXT) &&
5671             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5672                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5673                 return -EOPNOTSUPP;
5674         }
5675
5676         flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5677         switch (flow_type) {
5678         case SCTP_V4_FLOW:
5679         case TCP_V4_FLOW:
5680         case UDP_V4_FLOW:
5681                 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5682                                                   unused_tuple);
5683                 break;
5684         case IP_USER_FLOW:
5685                 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5686                                                unused_tuple);
5687                 break;
5688         case SCTP_V6_FLOW:
5689         case TCP_V6_FLOW:
5690         case UDP_V6_FLOW:
5691                 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5692                                                   unused_tuple);
5693                 break;
5694         case IPV6_USER_FLOW:
5695                 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5696                                                unused_tuple);
5697                 break;
5698         case ETHER_FLOW:
5699                 if (hdev->fd_cfg.fd_mode !=
5700                         HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5701                         dev_err(&hdev->pdev->dev,
5702                                 "ETHER_FLOW is not supported in current fd mode!\n");
5703                         return -EOPNOTSUPP;
5704                 }
5705
5706                 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5707                                                  unused_tuple);
5708                 break;
5709         default:
5710                 dev_err(&hdev->pdev->dev,
5711                         "unsupported protocol type, protocol type = %#x\n",
5712                         flow_type);
5713                 return -EOPNOTSUPP;
5714         }
5715
5716         if (ret) {
5717                 dev_err(&hdev->pdev->dev,
5718                         "failed to check flow union tuple, ret = %d\n",
5719                         ret);
5720                 return ret;
5721         }
5722
5723         return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5724 }
5725
5726 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5727 {
5728         struct hclge_fd_rule *rule = NULL;
5729         struct hlist_node *node2;
5730
5731         spin_lock_bh(&hdev->fd_rule_lock);
5732         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5733                 if (rule->location >= location)
5734                         break;
5735         }
5736
5737         spin_unlock_bh(&hdev->fd_rule_lock);
5738
5739         return  rule && rule->location == location;
5740 }
5741
5742 /* make sure being called after lock up with fd_rule_lock */
5743 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5744                                      struct hclge_fd_rule *new_rule,
5745                                      u16 location,
5746                                      bool is_add)
5747 {
5748         struct hclge_fd_rule *rule = NULL, *parent = NULL;
5749         struct hlist_node *node2;
5750
5751         if (is_add && !new_rule)
5752                 return -EINVAL;
5753
5754         hlist_for_each_entry_safe(rule, node2,
5755                                   &hdev->fd_rule_list, rule_node) {
5756                 if (rule->location >= location)
5757                         break;
5758                 parent = rule;
5759         }
5760
5761         if (rule && rule->location == location) {
5762                 hlist_del(&rule->rule_node);
5763                 kfree(rule);
5764                 hdev->hclge_fd_rule_num--;
5765
5766                 if (!is_add) {
5767                         if (!hdev->hclge_fd_rule_num)
5768                                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5769                         clear_bit(location, hdev->fd_bmap);
5770
5771                         return 0;
5772                 }
5773         } else if (!is_add) {
5774                 dev_err(&hdev->pdev->dev,
5775                         "delete fail, rule %u is inexistent\n",
5776                         location);
5777                 return -EINVAL;
5778         }
5779
5780         INIT_HLIST_NODE(&new_rule->rule_node);
5781
5782         if (parent)
5783                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5784         else
5785                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5786
5787         set_bit(location, hdev->fd_bmap);
5788         hdev->hclge_fd_rule_num++;
5789         hdev->fd_active_type = new_rule->rule_type;
5790
5791         return 0;
5792 }
5793
5794 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5795                               struct ethtool_rx_flow_spec *fs,
5796                               struct hclge_fd_rule *rule)
5797 {
5798         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5799
5800         switch (flow_type) {
5801         case SCTP_V4_FLOW:
5802         case TCP_V4_FLOW:
5803         case UDP_V4_FLOW:
5804                 rule->tuples.src_ip[IPV4_INDEX] =
5805                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5806                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5807                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5808
5809                 rule->tuples.dst_ip[IPV4_INDEX] =
5810                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5811                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5812                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5813
5814                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5815                 rule->tuples_mask.src_port =
5816                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5817
5818                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5819                 rule->tuples_mask.dst_port =
5820                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5821
5822                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5823                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5824
5825                 rule->tuples.ether_proto = ETH_P_IP;
5826                 rule->tuples_mask.ether_proto = 0xFFFF;
5827
5828                 break;
5829         case IP_USER_FLOW:
5830                 rule->tuples.src_ip[IPV4_INDEX] =
5831                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5832                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5833                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5834
5835                 rule->tuples.dst_ip[IPV4_INDEX] =
5836                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5837                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5838                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5839
5840                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5841                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5842
5843                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5844                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5845
5846                 rule->tuples.ether_proto = ETH_P_IP;
5847                 rule->tuples_mask.ether_proto = 0xFFFF;
5848
5849                 break;
5850         case SCTP_V6_FLOW:
5851         case TCP_V6_FLOW:
5852         case UDP_V6_FLOW:
5853                 be32_to_cpu_array(rule->tuples.src_ip,
5854                                   fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5855                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5856                                   fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5857
5858                 be32_to_cpu_array(rule->tuples.dst_ip,
5859                                   fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5860                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5861                                   fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5862
5863                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5864                 rule->tuples_mask.src_port =
5865                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5866
5867                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5868                 rule->tuples_mask.dst_port =
5869                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5870
5871                 rule->tuples.ether_proto = ETH_P_IPV6;
5872                 rule->tuples_mask.ether_proto = 0xFFFF;
5873
5874                 break;
5875         case IPV6_USER_FLOW:
5876                 be32_to_cpu_array(rule->tuples.src_ip,
5877                                   fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5878                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5879                                   fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5880
5881                 be32_to_cpu_array(rule->tuples.dst_ip,
5882                                   fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5883                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5884                                   fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5885
5886                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5887                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5888
5889                 rule->tuples.ether_proto = ETH_P_IPV6;
5890                 rule->tuples_mask.ether_proto = 0xFFFF;
5891
5892                 break;
5893         case ETHER_FLOW:
5894                 ether_addr_copy(rule->tuples.src_mac,
5895                                 fs->h_u.ether_spec.h_source);
5896                 ether_addr_copy(rule->tuples_mask.src_mac,
5897                                 fs->m_u.ether_spec.h_source);
5898
5899                 ether_addr_copy(rule->tuples.dst_mac,
5900                                 fs->h_u.ether_spec.h_dest);
5901                 ether_addr_copy(rule->tuples_mask.dst_mac,
5902                                 fs->m_u.ether_spec.h_dest);
5903
5904                 rule->tuples.ether_proto =
5905                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5906                 rule->tuples_mask.ether_proto =
5907                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5908
5909                 break;
5910         default:
5911                 return -EOPNOTSUPP;
5912         }
5913
5914         switch (flow_type) {
5915         case SCTP_V4_FLOW:
5916         case SCTP_V6_FLOW:
5917                 rule->tuples.ip_proto = IPPROTO_SCTP;
5918                 rule->tuples_mask.ip_proto = 0xFF;
5919                 break;
5920         case TCP_V4_FLOW:
5921         case TCP_V6_FLOW:
5922                 rule->tuples.ip_proto = IPPROTO_TCP;
5923                 rule->tuples_mask.ip_proto = 0xFF;
5924                 break;
5925         case UDP_V4_FLOW:
5926         case UDP_V6_FLOW:
5927                 rule->tuples.ip_proto = IPPROTO_UDP;
5928                 rule->tuples_mask.ip_proto = 0xFF;
5929                 break;
5930         default:
5931                 break;
5932         }
5933
5934         if (fs->flow_type & FLOW_EXT) {
5935                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5936                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5937         }
5938
5939         if (fs->flow_type & FLOW_MAC_EXT) {
5940                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5941                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5942         }
5943
5944         return 0;
5945 }
5946
5947 /* make sure being called after lock up with fd_rule_lock */
5948 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5949                                 struct hclge_fd_rule *rule)
5950 {
5951         int ret;
5952
5953         if (!rule) {
5954                 dev_err(&hdev->pdev->dev,
5955                         "The flow director rule is NULL\n");
5956                 return -EINVAL;
5957         }
5958
5959         /* it will never fail here, so needn't to check return value */
5960         hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5961
5962         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5963         if (ret)
5964                 goto clear_rule;
5965
5966         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5967         if (ret)
5968                 goto clear_rule;
5969
5970         return 0;
5971
5972 clear_rule:
5973         hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5974         return ret;
5975 }
5976
5977 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
5978 {
5979         struct hclge_vport *vport = hclge_get_vport(handle);
5980         struct hclge_dev *hdev = vport->back;
5981
5982         return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
5983 }
5984
5985 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5986                               struct ethtool_rxnfc *cmd)
5987 {
5988         struct hclge_vport *vport = hclge_get_vport(handle);
5989         struct hclge_dev *hdev = vport->back;
5990         u16 dst_vport_id = 0, q_index = 0;
5991         struct ethtool_rx_flow_spec *fs;
5992         struct hclge_fd_rule *rule;
5993         u32 unused = 0;
5994         u8 action;
5995         int ret;
5996
5997         if (!hnae3_dev_fd_supported(hdev)) {
5998                 dev_err(&hdev->pdev->dev,
5999                         "flow table director is not supported\n");
6000                 return -EOPNOTSUPP;
6001         }
6002
6003         if (!hdev->fd_en) {
6004                 dev_err(&hdev->pdev->dev,
6005                         "please enable flow director first\n");
6006                 return -EOPNOTSUPP;
6007         }
6008
6009         if (hclge_is_cls_flower_active(handle)) {
6010                 dev_err(&hdev->pdev->dev,
6011                         "please delete all exist cls flower rules first\n");
6012                 return -EINVAL;
6013         }
6014
6015         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6016
6017         ret = hclge_fd_check_spec(hdev, fs, &unused);
6018         if (ret)
6019                 return ret;
6020
6021         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
6022                 action = HCLGE_FD_ACTION_DROP_PACKET;
6023         } else {
6024                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
6025                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
6026                 u16 tqps;
6027
6028                 if (vf > hdev->num_req_vfs) {
6029                         dev_err(&hdev->pdev->dev,
6030                                 "Error: vf id (%u) > max vf num (%u)\n",
6031                                 vf, hdev->num_req_vfs);
6032                         return -EINVAL;
6033                 }
6034
6035                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6036                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
6037
6038                 if (ring >= tqps) {
6039                         dev_err(&hdev->pdev->dev,
6040                                 "Error: queue id (%u) > max tqp num (%u)\n",
6041                                 ring, tqps - 1);
6042                         return -EINVAL;
6043                 }
6044
6045                 action = HCLGE_FD_ACTION_SELECT_QUEUE;
6046                 q_index = ring;
6047         }
6048
6049         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6050         if (!rule)
6051                 return -ENOMEM;
6052
6053         ret = hclge_fd_get_tuple(hdev, fs, rule);
6054         if (ret) {
6055                 kfree(rule);
6056                 return ret;
6057         }
6058
6059         rule->flow_type = fs->flow_type;
6060         rule->location = fs->location;
6061         rule->unused_tuple = unused;
6062         rule->vf_id = dst_vport_id;
6063         rule->queue_id = q_index;
6064         rule->action = action;
6065         rule->rule_type = HCLGE_FD_EP_ACTIVE;
6066
6067         /* to avoid rule conflict, when user configure rule by ethtool,
6068          * we need to clear all arfs rules
6069          */
6070         spin_lock_bh(&hdev->fd_rule_lock);
6071         hclge_clear_arfs_rules(handle);
6072
6073         ret = hclge_fd_config_rule(hdev, rule);
6074
6075         spin_unlock_bh(&hdev->fd_rule_lock);
6076
6077         return ret;
6078 }
6079
6080 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6081                               struct ethtool_rxnfc *cmd)
6082 {
6083         struct hclge_vport *vport = hclge_get_vport(handle);
6084         struct hclge_dev *hdev = vport->back;
6085         struct ethtool_rx_flow_spec *fs;
6086         int ret;
6087
6088         if (!hnae3_dev_fd_supported(hdev))
6089                 return -EOPNOTSUPP;
6090
6091         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6092
6093         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6094                 return -EINVAL;
6095
6096         if (hclge_is_cls_flower_active(handle) || !hdev->hclge_fd_rule_num ||
6097             !hclge_fd_rule_exist(hdev, fs->location)) {
6098                 dev_err(&hdev->pdev->dev,
6099                         "Delete fail, rule %u is inexistent\n", fs->location);
6100                 return -ENOENT;
6101         }
6102
6103         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6104                                    NULL, false);
6105         if (ret)
6106                 return ret;
6107
6108         spin_lock_bh(&hdev->fd_rule_lock);
6109         ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
6110
6111         spin_unlock_bh(&hdev->fd_rule_lock);
6112
6113         return ret;
6114 }
6115
6116 /* make sure being called after lock up with fd_rule_lock */
6117 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
6118                                      bool clear_list)
6119 {
6120         struct hclge_vport *vport = hclge_get_vport(handle);
6121         struct hclge_dev *hdev = vport->back;
6122         struct hclge_fd_rule *rule;
6123         struct hlist_node *node;
6124         u16 location;
6125
6126         if (!hnae3_dev_fd_supported(hdev))
6127                 return;
6128
6129         for_each_set_bit(location, hdev->fd_bmap,
6130                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6131                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6132                                      NULL, false);
6133
6134         if (clear_list) {
6135                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6136                                           rule_node) {
6137                         hlist_del(&rule->rule_node);
6138                         kfree(rule);
6139                 }
6140                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6141                 hdev->hclge_fd_rule_num = 0;
6142                 bitmap_zero(hdev->fd_bmap,
6143                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6144         }
6145 }
6146
6147 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6148 {
6149         struct hclge_vport *vport = hclge_get_vport(handle);
6150         struct hclge_dev *hdev = vport->back;
6151         struct hclge_fd_rule *rule;
6152         struct hlist_node *node;
6153         int ret;
6154
6155         /* Return ok here, because reset error handling will check this
6156          * return value. If error is returned here, the reset process will
6157          * fail.
6158          */
6159         if (!hnae3_dev_fd_supported(hdev))
6160                 return 0;
6161
6162         /* if fd is disabled, should not restore it when reset */
6163         if (!hdev->fd_en)
6164                 return 0;
6165
6166         spin_lock_bh(&hdev->fd_rule_lock);
6167         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6168                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6169                 if (!ret)
6170                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6171
6172                 if (ret) {
6173                         dev_warn(&hdev->pdev->dev,
6174                                  "Restore rule %u failed, remove it\n",
6175                                  rule->location);
6176                         clear_bit(rule->location, hdev->fd_bmap);
6177                         hlist_del(&rule->rule_node);
6178                         kfree(rule);
6179                         hdev->hclge_fd_rule_num--;
6180                 }
6181         }
6182
6183         if (hdev->hclge_fd_rule_num)
6184                 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
6185
6186         spin_unlock_bh(&hdev->fd_rule_lock);
6187
6188         return 0;
6189 }
6190
6191 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6192                                  struct ethtool_rxnfc *cmd)
6193 {
6194         struct hclge_vport *vport = hclge_get_vport(handle);
6195         struct hclge_dev *hdev = vport->back;
6196
6197         if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6198                 return -EOPNOTSUPP;
6199
6200         cmd->rule_cnt = hdev->hclge_fd_rule_num;
6201         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6202
6203         return 0;
6204 }
6205
6206 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6207                                      struct ethtool_tcpip4_spec *spec,
6208                                      struct ethtool_tcpip4_spec *spec_mask)
6209 {
6210         spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6211         spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6212                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6213
6214         spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6215         spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6216                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6217
6218         spec->psrc = cpu_to_be16(rule->tuples.src_port);
6219         spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6220                         0 : cpu_to_be16(rule->tuples_mask.src_port);
6221
6222         spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6223         spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6224                         0 : cpu_to_be16(rule->tuples_mask.dst_port);
6225
6226         spec->tos = rule->tuples.ip_tos;
6227         spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6228                         0 : rule->tuples_mask.ip_tos;
6229 }
6230
6231 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6232                                   struct ethtool_usrip4_spec *spec,
6233                                   struct ethtool_usrip4_spec *spec_mask)
6234 {
6235         spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6236         spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6237                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6238
6239         spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6240         spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6241                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6242
6243         spec->tos = rule->tuples.ip_tos;
6244         spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6245                         0 : rule->tuples_mask.ip_tos;
6246
6247         spec->proto = rule->tuples.ip_proto;
6248         spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6249                         0 : rule->tuples_mask.ip_proto;
6250
6251         spec->ip_ver = ETH_RX_NFC_IP4;
6252 }
6253
6254 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6255                                      struct ethtool_tcpip6_spec *spec,
6256                                      struct ethtool_tcpip6_spec *spec_mask)
6257 {
6258         cpu_to_be32_array(spec->ip6src,
6259                           rule->tuples.src_ip, IPV6_SIZE);
6260         cpu_to_be32_array(spec->ip6dst,
6261                           rule->tuples.dst_ip, IPV6_SIZE);
6262         if (rule->unused_tuple & BIT(INNER_SRC_IP))
6263                 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6264         else
6265                 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6266                                   IPV6_SIZE);
6267
6268         if (rule->unused_tuple & BIT(INNER_DST_IP))
6269                 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6270         else
6271                 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6272                                   IPV6_SIZE);
6273
6274         spec->psrc = cpu_to_be16(rule->tuples.src_port);
6275         spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6276                         0 : cpu_to_be16(rule->tuples_mask.src_port);
6277
6278         spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6279         spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6280                         0 : cpu_to_be16(rule->tuples_mask.dst_port);
6281 }
6282
6283 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6284                                   struct ethtool_usrip6_spec *spec,
6285                                   struct ethtool_usrip6_spec *spec_mask)
6286 {
6287         cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6288         cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6289         if (rule->unused_tuple & BIT(INNER_SRC_IP))
6290                 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6291         else
6292                 cpu_to_be32_array(spec_mask->ip6src,
6293                                   rule->tuples_mask.src_ip, IPV6_SIZE);
6294
6295         if (rule->unused_tuple & BIT(INNER_DST_IP))
6296                 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6297         else
6298                 cpu_to_be32_array(spec_mask->ip6dst,
6299                                   rule->tuples_mask.dst_ip, IPV6_SIZE);
6300
6301         spec->l4_proto = rule->tuples.ip_proto;
6302         spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6303                         0 : rule->tuples_mask.ip_proto;
6304 }
6305
6306 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6307                                     struct ethhdr *spec,
6308                                     struct ethhdr *spec_mask)
6309 {
6310         ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6311         ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6312
6313         if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6314                 eth_zero_addr(spec_mask->h_source);
6315         else
6316                 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6317
6318         if (rule->unused_tuple & BIT(INNER_DST_MAC))
6319                 eth_zero_addr(spec_mask->h_dest);
6320         else
6321                 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6322
6323         spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6324         spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6325                         0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6326 }
6327
6328 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6329                                   struct hclge_fd_rule *rule)
6330 {
6331         if (fs->flow_type & FLOW_EXT) {
6332                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6333                 fs->m_ext.vlan_tci =
6334                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6335                                 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6336         }
6337
6338         if (fs->flow_type & FLOW_MAC_EXT) {
6339                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6340                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6341                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
6342                 else
6343                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
6344                                         rule->tuples_mask.dst_mac);
6345         }
6346 }
6347
6348 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6349                                   struct ethtool_rxnfc *cmd)
6350 {
6351         struct hclge_vport *vport = hclge_get_vport(handle);
6352         struct hclge_fd_rule *rule = NULL;
6353         struct hclge_dev *hdev = vport->back;
6354         struct ethtool_rx_flow_spec *fs;
6355         struct hlist_node *node2;
6356
6357         if (!hnae3_dev_fd_supported(hdev))
6358                 return -EOPNOTSUPP;
6359
6360         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6361
6362         spin_lock_bh(&hdev->fd_rule_lock);
6363
6364         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6365                 if (rule->location >= fs->location)
6366                         break;
6367         }
6368
6369         if (!rule || fs->location != rule->location) {
6370                 spin_unlock_bh(&hdev->fd_rule_lock);
6371
6372                 return -ENOENT;
6373         }
6374
6375         fs->flow_type = rule->flow_type;
6376         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6377         case SCTP_V4_FLOW:
6378         case TCP_V4_FLOW:
6379         case UDP_V4_FLOW:
6380                 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6381                                          &fs->m_u.tcp_ip4_spec);
6382                 break;
6383         case IP_USER_FLOW:
6384                 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6385                                       &fs->m_u.usr_ip4_spec);
6386                 break;
6387         case SCTP_V6_FLOW:
6388         case TCP_V6_FLOW:
6389         case UDP_V6_FLOW:
6390                 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6391                                          &fs->m_u.tcp_ip6_spec);
6392                 break;
6393         case IPV6_USER_FLOW:
6394                 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6395                                       &fs->m_u.usr_ip6_spec);
6396                 break;
6397         /* The flow type of fd rule has been checked before adding in to rule
6398          * list. As other flow types have been handled, it must be ETHER_FLOW
6399          * for the default case
6400          */
6401         default:
6402                 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6403                                         &fs->m_u.ether_spec);
6404                 break;
6405         }
6406
6407         hclge_fd_get_ext_info(fs, rule);
6408
6409         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6410                 fs->ring_cookie = RX_CLS_FLOW_DISC;
6411         } else {
6412                 u64 vf_id;
6413
6414                 fs->ring_cookie = rule->queue_id;
6415                 vf_id = rule->vf_id;
6416                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6417                 fs->ring_cookie |= vf_id;
6418         }
6419
6420         spin_unlock_bh(&hdev->fd_rule_lock);
6421
6422         return 0;
6423 }
6424
6425 static int hclge_get_all_rules(struct hnae3_handle *handle,
6426                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
6427 {
6428         struct hclge_vport *vport = hclge_get_vport(handle);
6429         struct hclge_dev *hdev = vport->back;
6430         struct hclge_fd_rule *rule;
6431         struct hlist_node *node2;
6432         int cnt = 0;
6433
6434         if (!hnae3_dev_fd_supported(hdev))
6435                 return -EOPNOTSUPP;
6436
6437         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6438
6439         spin_lock_bh(&hdev->fd_rule_lock);
6440         hlist_for_each_entry_safe(rule, node2,
6441                                   &hdev->fd_rule_list, rule_node) {
6442                 if (cnt == cmd->rule_cnt) {
6443                         spin_unlock_bh(&hdev->fd_rule_lock);
6444                         return -EMSGSIZE;
6445                 }
6446
6447                 rule_locs[cnt] = rule->location;
6448                 cnt++;
6449         }
6450
6451         spin_unlock_bh(&hdev->fd_rule_lock);
6452
6453         cmd->rule_cnt = cnt;
6454
6455         return 0;
6456 }
6457
6458 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6459                                      struct hclge_fd_rule_tuples *tuples)
6460 {
6461 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6462 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6463
6464         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6465         tuples->ip_proto = fkeys->basic.ip_proto;
6466         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6467
6468         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6469                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6470                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6471         } else {
6472                 int i;
6473
6474                 for (i = 0; i < IPV6_SIZE; i++) {
6475                         tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6476                         tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6477                 }
6478         }
6479 }
6480
6481 /* traverse all rules, check whether an existed rule has the same tuples */
6482 static struct hclge_fd_rule *
6483 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6484                           const struct hclge_fd_rule_tuples *tuples)
6485 {
6486         struct hclge_fd_rule *rule = NULL;
6487         struct hlist_node *node;
6488
6489         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6490                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6491                         return rule;
6492         }
6493
6494         return NULL;
6495 }
6496
6497 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6498                                      struct hclge_fd_rule *rule)
6499 {
6500         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6501                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6502                              BIT(INNER_SRC_PORT);
6503         rule->action = 0;
6504         rule->vf_id = 0;
6505         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6506         if (tuples->ether_proto == ETH_P_IP) {
6507                 if (tuples->ip_proto == IPPROTO_TCP)
6508                         rule->flow_type = TCP_V4_FLOW;
6509                 else
6510                         rule->flow_type = UDP_V4_FLOW;
6511         } else {
6512                 if (tuples->ip_proto == IPPROTO_TCP)
6513                         rule->flow_type = TCP_V6_FLOW;
6514                 else
6515                         rule->flow_type = UDP_V6_FLOW;
6516         }
6517         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6518         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6519 }
6520
6521 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6522                                       u16 flow_id, struct flow_keys *fkeys)
6523 {
6524         struct hclge_vport *vport = hclge_get_vport(handle);
6525         struct hclge_fd_rule_tuples new_tuples = {};
6526         struct hclge_dev *hdev = vport->back;
6527         struct hclge_fd_rule *rule;
6528         u16 tmp_queue_id;
6529         u16 bit_id;
6530         int ret;
6531
6532         if (!hnae3_dev_fd_supported(hdev))
6533                 return -EOPNOTSUPP;
6534
6535         /* when there is already fd rule existed add by user,
6536          * arfs should not work
6537          */
6538         spin_lock_bh(&hdev->fd_rule_lock);
6539         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
6540             hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
6541                 spin_unlock_bh(&hdev->fd_rule_lock);
6542                 return -EOPNOTSUPP;
6543         }
6544
6545         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6546
6547         /* check is there flow director filter existed for this flow,
6548          * if not, create a new filter for it;
6549          * if filter exist with different queue id, modify the filter;
6550          * if filter exist with same queue id, do nothing
6551          */
6552         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6553         if (!rule) {
6554                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6555                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6556                         spin_unlock_bh(&hdev->fd_rule_lock);
6557                         return -ENOSPC;
6558                 }
6559
6560                 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6561                 if (!rule) {
6562                         spin_unlock_bh(&hdev->fd_rule_lock);
6563                         return -ENOMEM;
6564                 }
6565
6566                 set_bit(bit_id, hdev->fd_bmap);
6567                 rule->location = bit_id;
6568                 rule->arfs.flow_id = flow_id;
6569                 rule->queue_id = queue_id;
6570                 hclge_fd_build_arfs_rule(&new_tuples, rule);
6571                 ret = hclge_fd_config_rule(hdev, rule);
6572
6573                 spin_unlock_bh(&hdev->fd_rule_lock);
6574
6575                 if (ret)
6576                         return ret;
6577
6578                 return rule->location;
6579         }
6580
6581         spin_unlock_bh(&hdev->fd_rule_lock);
6582
6583         if (rule->queue_id == queue_id)
6584                 return rule->location;
6585
6586         tmp_queue_id = rule->queue_id;
6587         rule->queue_id = queue_id;
6588         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6589         if (ret) {
6590                 rule->queue_id = tmp_queue_id;
6591                 return ret;
6592         }
6593
6594         return rule->location;
6595 }
6596
6597 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6598 {
6599 #ifdef CONFIG_RFS_ACCEL
6600         struct hnae3_handle *handle = &hdev->vport[0].nic;
6601         struct hclge_fd_rule *rule;
6602         struct hlist_node *node;
6603         HLIST_HEAD(del_list);
6604
6605         spin_lock_bh(&hdev->fd_rule_lock);
6606         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6607                 spin_unlock_bh(&hdev->fd_rule_lock);
6608                 return;
6609         }
6610         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6611                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6612                                         rule->arfs.flow_id, rule->location)) {
6613                         hlist_del_init(&rule->rule_node);
6614                         hlist_add_head(&rule->rule_node, &del_list);
6615                         hdev->hclge_fd_rule_num--;
6616                         clear_bit(rule->location, hdev->fd_bmap);
6617                 }
6618         }
6619         spin_unlock_bh(&hdev->fd_rule_lock);
6620
6621         hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6622                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6623                                      rule->location, NULL, false);
6624                 kfree(rule);
6625         }
6626 #endif
6627 }
6628
6629 /* make sure being called after lock up with fd_rule_lock */
6630 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6631 {
6632 #ifdef CONFIG_RFS_ACCEL
6633         struct hclge_vport *vport = hclge_get_vport(handle);
6634         struct hclge_dev *hdev = vport->back;
6635
6636         if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6637                 hclge_del_all_fd_entries(handle, true);
6638 #endif
6639 }
6640
6641 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
6642                                     struct hclge_fd_rule *rule)
6643 {
6644         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
6645                 struct flow_match_basic match;
6646                 u16 ethtype_key, ethtype_mask;
6647
6648                 flow_rule_match_basic(flow, &match);
6649                 ethtype_key = ntohs(match.key->n_proto);
6650                 ethtype_mask = ntohs(match.mask->n_proto);
6651
6652                 if (ethtype_key == ETH_P_ALL) {
6653                         ethtype_key = 0;
6654                         ethtype_mask = 0;
6655                 }
6656                 rule->tuples.ether_proto = ethtype_key;
6657                 rule->tuples_mask.ether_proto = ethtype_mask;
6658                 rule->tuples.ip_proto = match.key->ip_proto;
6659                 rule->tuples_mask.ip_proto = match.mask->ip_proto;
6660         } else {
6661                 rule->unused_tuple |= BIT(INNER_IP_PROTO);
6662                 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
6663         }
6664 }
6665
6666 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
6667                                   struct hclge_fd_rule *rule)
6668 {
6669         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
6670                 struct flow_match_eth_addrs match;
6671
6672                 flow_rule_match_eth_addrs(flow, &match);
6673                 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
6674                 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
6675                 ether_addr_copy(rule->tuples.src_mac, match.key->src);
6676                 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
6677         } else {
6678                 rule->unused_tuple |= BIT(INNER_DST_MAC);
6679                 rule->unused_tuple |= BIT(INNER_SRC_MAC);
6680         }
6681 }
6682
6683 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
6684                                    struct hclge_fd_rule *rule)
6685 {
6686         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
6687                 struct flow_match_vlan match;
6688
6689                 flow_rule_match_vlan(flow, &match);
6690                 rule->tuples.vlan_tag1 = match.key->vlan_id |
6691                                 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
6692                 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
6693                                 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
6694         } else {
6695                 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6696         }
6697 }
6698
6699 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
6700                                  struct hclge_fd_rule *rule)
6701 {
6702         u16 addr_type = 0;
6703
6704         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
6705                 struct flow_match_control match;
6706
6707                 flow_rule_match_control(flow, &match);
6708                 addr_type = match.key->addr_type;
6709         }
6710
6711         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
6712                 struct flow_match_ipv4_addrs match;
6713
6714                 flow_rule_match_ipv4_addrs(flow, &match);
6715                 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
6716                 rule->tuples_mask.src_ip[IPV4_INDEX] =
6717                                                 be32_to_cpu(match.mask->src);
6718                 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
6719                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6720                                                 be32_to_cpu(match.mask->dst);
6721         } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
6722                 struct flow_match_ipv6_addrs match;
6723
6724                 flow_rule_match_ipv6_addrs(flow, &match);
6725                 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
6726                                   IPV6_SIZE);
6727                 be32_to_cpu_array(rule->tuples_mask.src_ip,
6728                                   match.mask->src.s6_addr32, IPV6_SIZE);
6729                 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
6730                                   IPV6_SIZE);
6731                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
6732                                   match.mask->dst.s6_addr32, IPV6_SIZE);
6733         } else {
6734                 rule->unused_tuple |= BIT(INNER_SRC_IP);
6735                 rule->unused_tuple |= BIT(INNER_DST_IP);
6736         }
6737 }
6738
6739 static void hclge_get_cls_key_port(const struct flow_rule *flow,
6740                                    struct hclge_fd_rule *rule)
6741 {
6742         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
6743                 struct flow_match_ports match;
6744
6745                 flow_rule_match_ports(flow, &match);
6746
6747                 rule->tuples.src_port = be16_to_cpu(match.key->src);
6748                 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
6749                 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
6750                 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
6751         } else {
6752                 rule->unused_tuple |= BIT(INNER_SRC_PORT);
6753                 rule->unused_tuple |= BIT(INNER_DST_PORT);
6754         }
6755 }
6756
6757 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
6758                                   struct flow_cls_offload *cls_flower,
6759                                   struct hclge_fd_rule *rule)
6760 {
6761         struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
6762         struct flow_dissector *dissector = flow->match.dissector;
6763
6764         if (dissector->used_keys &
6765             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
6766               BIT(FLOW_DISSECTOR_KEY_BASIC) |
6767               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
6768               BIT(FLOW_DISSECTOR_KEY_VLAN) |
6769               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
6770               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
6771               BIT(FLOW_DISSECTOR_KEY_PORTS))) {
6772                 dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
6773                         dissector->used_keys);
6774                 return -EOPNOTSUPP;
6775         }
6776
6777         hclge_get_cls_key_basic(flow, rule);
6778         hclge_get_cls_key_mac(flow, rule);
6779         hclge_get_cls_key_vlan(flow, rule);
6780         hclge_get_cls_key_ip(flow, rule);
6781         hclge_get_cls_key_port(flow, rule);
6782
6783         return 0;
6784 }
6785
6786 static int hclge_check_cls_flower(struct hclge_dev *hdev,
6787                                   struct flow_cls_offload *cls_flower, int tc)
6788 {
6789         u32 prio = cls_flower->common.prio;
6790
6791         if (tc < 0 || tc > hdev->tc_max) {
6792                 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
6793                 return -EINVAL;
6794         }
6795
6796         if (prio == 0 ||
6797             prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6798                 dev_err(&hdev->pdev->dev,
6799                         "prio %u should be in range[1, %u]\n",
6800                         prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6801                 return -EINVAL;
6802         }
6803
6804         if (test_bit(prio - 1, hdev->fd_bmap)) {
6805                 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
6806                 return -EINVAL;
6807         }
6808         return 0;
6809 }
6810
6811 static int hclge_add_cls_flower(struct hnae3_handle *handle,
6812                                 struct flow_cls_offload *cls_flower,
6813                                 int tc)
6814 {
6815         struct hclge_vport *vport = hclge_get_vport(handle);
6816         struct hclge_dev *hdev = vport->back;
6817         struct hclge_fd_rule *rule;
6818         int ret;
6819
6820         if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6821                 dev_err(&hdev->pdev->dev,
6822                         "please remove all exist fd rules via ethtool first\n");
6823                 return -EINVAL;
6824         }
6825
6826         ret = hclge_check_cls_flower(hdev, cls_flower, tc);
6827         if (ret) {
6828                 dev_err(&hdev->pdev->dev,
6829                         "failed to check cls flower params, ret = %d\n", ret);
6830                 return ret;
6831         }
6832
6833         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6834         if (!rule)
6835                 return -ENOMEM;
6836
6837         ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
6838         if (ret)
6839                 goto err;
6840
6841         rule->action = HCLGE_FD_ACTION_SELECT_TC;
6842         rule->cls_flower.tc = tc;
6843         rule->location = cls_flower->common.prio - 1;
6844         rule->vf_id = 0;
6845         rule->cls_flower.cookie = cls_flower->cookie;
6846         rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
6847
6848         spin_lock_bh(&hdev->fd_rule_lock);
6849         hclge_clear_arfs_rules(handle);
6850
6851         ret = hclge_fd_config_rule(hdev, rule);
6852
6853         spin_unlock_bh(&hdev->fd_rule_lock);
6854
6855         if (ret) {
6856                 dev_err(&hdev->pdev->dev,
6857                         "failed to add cls flower rule, ret = %d\n", ret);
6858                 goto err;
6859         }
6860
6861         return 0;
6862 err:
6863         kfree(rule);
6864         return ret;
6865 }
6866
6867 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
6868                                                    unsigned long cookie)
6869 {
6870         struct hclge_fd_rule *rule;
6871         struct hlist_node *node;
6872
6873         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6874                 if (rule->cls_flower.cookie == cookie)
6875                         return rule;
6876         }
6877
6878         return NULL;
6879 }
6880
6881 static int hclge_del_cls_flower(struct hnae3_handle *handle,
6882                                 struct flow_cls_offload *cls_flower)
6883 {
6884         struct hclge_vport *vport = hclge_get_vport(handle);
6885         struct hclge_dev *hdev = vport->back;
6886         struct hclge_fd_rule *rule;
6887         int ret;
6888
6889         spin_lock_bh(&hdev->fd_rule_lock);
6890
6891         rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
6892         if (!rule) {
6893                 spin_unlock_bh(&hdev->fd_rule_lock);
6894                 return -EINVAL;
6895         }
6896
6897         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
6898                                    NULL, false);
6899         if (ret) {
6900                 dev_err(&hdev->pdev->dev,
6901                         "failed to delete cls flower rule %u, ret = %d\n",
6902                         rule->location, ret);
6903                 spin_unlock_bh(&hdev->fd_rule_lock);
6904                 return ret;
6905         }
6906
6907         ret = hclge_fd_update_rule_list(hdev, NULL, rule->location, false);
6908         if (ret) {
6909                 dev_err(&hdev->pdev->dev,
6910                         "failed to delete cls flower rule %u in list, ret = %d\n",
6911                         rule->location, ret);
6912                 spin_unlock_bh(&hdev->fd_rule_lock);
6913                 return ret;
6914         }
6915
6916         spin_unlock_bh(&hdev->fd_rule_lock);
6917
6918         return 0;
6919 }
6920
6921 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6922 {
6923         struct hclge_vport *vport = hclge_get_vport(handle);
6924         struct hclge_dev *hdev = vport->back;
6925
6926         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6927                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6928 }
6929
6930 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
6931 {
6932         struct hclge_vport *vport = hclge_get_vport(handle);
6933         struct hclge_dev *hdev = vport->back;
6934
6935         return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
6936 }
6937
6938 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6939 {
6940         struct hclge_vport *vport = hclge_get_vport(handle);
6941         struct hclge_dev *hdev = vport->back;
6942
6943         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6944 }
6945
6946 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6947 {
6948         struct hclge_vport *vport = hclge_get_vport(handle);
6949         struct hclge_dev *hdev = vport->back;
6950
6951         return hdev->rst_stats.hw_reset_done_cnt;
6952 }
6953
6954 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6955 {
6956         struct hclge_vport *vport = hclge_get_vport(handle);
6957         struct hclge_dev *hdev = vport->back;
6958         bool clear;
6959
6960         hdev->fd_en = enable;
6961         clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6962
6963         if (!enable) {
6964                 spin_lock_bh(&hdev->fd_rule_lock);
6965                 hclge_del_all_fd_entries(handle, clear);
6966                 spin_unlock_bh(&hdev->fd_rule_lock);
6967         } else {
6968                 hclge_restore_fd_entries(handle);
6969         }
6970 }
6971
6972 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6973 {
6974         struct hclge_desc desc;
6975         struct hclge_config_mac_mode_cmd *req =
6976                 (struct hclge_config_mac_mode_cmd *)desc.data;
6977         u32 loop_en = 0;
6978         int ret;
6979
6980         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6981
6982         if (enable) {
6983                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6984                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6985                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6986                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6987                 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6988                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6989                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6990                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6991                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6992                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6993         }
6994
6995         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6996
6997         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6998         if (ret)
6999                 dev_err(&hdev->pdev->dev,
7000                         "mac enable fail, ret =%d.\n", ret);
7001 }
7002
7003 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7004                                      u8 switch_param, u8 param_mask)
7005 {
7006         struct hclge_mac_vlan_switch_cmd *req;
7007         struct hclge_desc desc;
7008         u32 func_id;
7009         int ret;
7010
7011         func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7012         req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7013
7014         /* read current config parameter */
7015         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7016                                    true);
7017         req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7018         req->func_id = cpu_to_le32(func_id);
7019
7020         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7021         if (ret) {
7022                 dev_err(&hdev->pdev->dev,
7023                         "read mac vlan switch parameter fail, ret = %d\n", ret);
7024                 return ret;
7025         }
7026
7027         /* modify and write new config parameter */
7028         hclge_cmd_reuse_desc(&desc, false);
7029         req->switch_param = (req->switch_param & param_mask) | switch_param;
7030         req->param_mask = param_mask;
7031
7032         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7033         if (ret)
7034                 dev_err(&hdev->pdev->dev,
7035                         "set mac vlan switch parameter fail, ret = %d\n", ret);
7036         return ret;
7037 }
7038
7039 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7040                                        int link_ret)
7041 {
7042 #define HCLGE_PHY_LINK_STATUS_NUM  200
7043
7044         struct phy_device *phydev = hdev->hw.mac.phydev;
7045         int i = 0;
7046         int ret;
7047
7048         do {
7049                 ret = phy_read_status(phydev);
7050                 if (ret) {
7051                         dev_err(&hdev->pdev->dev,
7052                                 "phy update link status fail, ret = %d\n", ret);
7053                         return;
7054                 }
7055
7056                 if (phydev->link == link_ret)
7057                         break;
7058
7059                 msleep(HCLGE_LINK_STATUS_MS);
7060         } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7061 }
7062
7063 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7064 {
7065 #define HCLGE_MAC_LINK_STATUS_NUM  100
7066
7067         int link_status;
7068         int i = 0;
7069         int ret;
7070
7071         do {
7072                 ret = hclge_get_mac_link_status(hdev, &link_status);
7073                 if (ret)
7074                         return ret;
7075                 if (link_status == link_ret)
7076                         return 0;
7077
7078                 msleep(HCLGE_LINK_STATUS_MS);
7079         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7080         return -EBUSY;
7081 }
7082
7083 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7084                                           bool is_phy)
7085 {
7086         int link_ret;
7087
7088         link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7089
7090         if (is_phy)
7091                 hclge_phy_link_status_wait(hdev, link_ret);
7092
7093         return hclge_mac_link_status_wait(hdev, link_ret);
7094 }
7095
7096 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7097 {
7098         struct hclge_config_mac_mode_cmd *req;
7099         struct hclge_desc desc;
7100         u32 loop_en;
7101         int ret;
7102
7103         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7104         /* 1 Read out the MAC mode config at first */
7105         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7106         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7107         if (ret) {
7108                 dev_err(&hdev->pdev->dev,
7109                         "mac loopback get fail, ret =%d.\n", ret);
7110                 return ret;
7111         }
7112
7113         /* 2 Then setup the loopback flag */
7114         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7115         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7116
7117         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7118
7119         /* 3 Config mac work mode with loopback flag
7120          * and its original configure parameters
7121          */
7122         hclge_cmd_reuse_desc(&desc, false);
7123         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7124         if (ret)
7125                 dev_err(&hdev->pdev->dev,
7126                         "mac loopback set fail, ret =%d.\n", ret);
7127         return ret;
7128 }
7129
7130 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
7131                                      enum hnae3_loop loop_mode)
7132 {
7133 #define HCLGE_SERDES_RETRY_MS   10
7134 #define HCLGE_SERDES_RETRY_NUM  100
7135
7136         struct hclge_serdes_lb_cmd *req;
7137         struct hclge_desc desc;
7138         int ret, i = 0;
7139         u8 loop_mode_b;
7140
7141         req = (struct hclge_serdes_lb_cmd *)desc.data;
7142         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
7143
7144         switch (loop_mode) {
7145         case HNAE3_LOOP_SERIAL_SERDES:
7146                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7147                 break;
7148         case HNAE3_LOOP_PARALLEL_SERDES:
7149                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7150                 break;
7151         default:
7152                 dev_err(&hdev->pdev->dev,
7153                         "unsupported serdes loopback mode %d\n", loop_mode);
7154                 return -ENOTSUPP;
7155         }
7156
7157         if (en) {
7158                 req->enable = loop_mode_b;
7159                 req->mask = loop_mode_b;
7160         } else {
7161                 req->mask = loop_mode_b;
7162         }
7163
7164         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7165         if (ret) {
7166                 dev_err(&hdev->pdev->dev,
7167                         "serdes loopback set fail, ret = %d\n", ret);
7168                 return ret;
7169         }
7170
7171         do {
7172                 msleep(HCLGE_SERDES_RETRY_MS);
7173                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
7174                                            true);
7175                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7176                 if (ret) {
7177                         dev_err(&hdev->pdev->dev,
7178                                 "serdes loopback get, ret = %d\n", ret);
7179                         return ret;
7180                 }
7181         } while (++i < HCLGE_SERDES_RETRY_NUM &&
7182                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
7183
7184         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
7185                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
7186                 return -EBUSY;
7187         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
7188                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
7189                 return -EIO;
7190         }
7191         return ret;
7192 }
7193
7194 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
7195                                      enum hnae3_loop loop_mode)
7196 {
7197         int ret;
7198
7199         ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
7200         if (ret)
7201                 return ret;
7202
7203         hclge_cfg_mac_mode(hdev, en);
7204
7205         ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7206         if (ret)
7207                 dev_err(&hdev->pdev->dev,
7208                         "serdes loopback config mac mode timeout\n");
7209
7210         return ret;
7211 }
7212
7213 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7214                                      struct phy_device *phydev)
7215 {
7216         int ret;
7217
7218         if (!phydev->suspended) {
7219                 ret = phy_suspend(phydev);
7220                 if (ret)
7221                         return ret;
7222         }
7223
7224         ret = phy_resume(phydev);
7225         if (ret)
7226                 return ret;
7227
7228         return phy_loopback(phydev, true);
7229 }
7230
7231 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7232                                       struct phy_device *phydev)
7233 {
7234         int ret;
7235
7236         ret = phy_loopback(phydev, false);
7237         if (ret)
7238                 return ret;
7239
7240         return phy_suspend(phydev);
7241 }
7242
7243 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7244 {
7245         struct phy_device *phydev = hdev->hw.mac.phydev;
7246         int ret;
7247
7248         if (!phydev)
7249                 return -ENOTSUPP;
7250
7251         if (en)
7252                 ret = hclge_enable_phy_loopback(hdev, phydev);
7253         else
7254                 ret = hclge_disable_phy_loopback(hdev, phydev);
7255         if (ret) {
7256                 dev_err(&hdev->pdev->dev,
7257                         "set phy loopback fail, ret = %d\n", ret);
7258                 return ret;
7259         }
7260
7261         hclge_cfg_mac_mode(hdev, en);
7262
7263         ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7264         if (ret)
7265                 dev_err(&hdev->pdev->dev,
7266                         "phy loopback config mac mode timeout\n");
7267
7268         return ret;
7269 }
7270
7271 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
7272                             int stream_id, bool enable)
7273 {
7274         struct hclge_desc desc;
7275         struct hclge_cfg_com_tqp_queue_cmd *req =
7276                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7277         int ret;
7278
7279         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7280         req->tqp_id = cpu_to_le16(tqp_id);
7281         req->stream_id = cpu_to_le16(stream_id);
7282         if (enable)
7283                 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7284
7285         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7286         if (ret)
7287                 dev_err(&hdev->pdev->dev,
7288                         "Tqp enable fail, status =%d.\n", ret);
7289         return ret;
7290 }
7291
7292 static int hclge_set_loopback(struct hnae3_handle *handle,
7293                               enum hnae3_loop loop_mode, bool en)
7294 {
7295         struct hclge_vport *vport = hclge_get_vport(handle);
7296         struct hnae3_knic_private_info *kinfo;
7297         struct hclge_dev *hdev = vport->back;
7298         int i, ret;
7299
7300         /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7301          * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7302          * the same, the packets are looped back in the SSU. If SSU loopback
7303          * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7304          */
7305         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7306                 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7307
7308                 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7309                                                 HCLGE_SWITCH_ALW_LPBK_MASK);
7310                 if (ret)
7311                         return ret;
7312         }
7313
7314         switch (loop_mode) {
7315         case HNAE3_LOOP_APP:
7316                 ret = hclge_set_app_loopback(hdev, en);
7317                 break;
7318         case HNAE3_LOOP_SERIAL_SERDES:
7319         case HNAE3_LOOP_PARALLEL_SERDES:
7320                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
7321                 break;
7322         case HNAE3_LOOP_PHY:
7323                 ret = hclge_set_phy_loopback(hdev, en);
7324                 break;
7325         default:
7326                 ret = -ENOTSUPP;
7327                 dev_err(&hdev->pdev->dev,
7328                         "loop_mode %d is not supported\n", loop_mode);
7329                 break;
7330         }
7331
7332         if (ret)
7333                 return ret;
7334
7335         kinfo = &vport->nic.kinfo;
7336         for (i = 0; i < kinfo->num_tqps; i++) {
7337                 ret = hclge_tqp_enable(hdev, i, 0, en);
7338                 if (ret)
7339                         return ret;
7340         }
7341
7342         return 0;
7343 }
7344
7345 static int hclge_set_default_loopback(struct hclge_dev *hdev)
7346 {
7347         int ret;
7348
7349         ret = hclge_set_app_loopback(hdev, false);
7350         if (ret)
7351                 return ret;
7352
7353         ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
7354         if (ret)
7355                 return ret;
7356
7357         return hclge_cfg_serdes_loopback(hdev, false,
7358                                          HNAE3_LOOP_PARALLEL_SERDES);
7359 }
7360
7361 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
7362 {
7363         struct hclge_vport *vport = hclge_get_vport(handle);
7364         struct hnae3_knic_private_info *kinfo;
7365         struct hnae3_queue *queue;
7366         struct hclge_tqp *tqp;
7367         int i;
7368
7369         kinfo = &vport->nic.kinfo;
7370         for (i = 0; i < kinfo->num_tqps; i++) {
7371                 queue = handle->kinfo.tqp[i];
7372                 tqp = container_of(queue, struct hclge_tqp, q);
7373                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
7374         }
7375 }
7376
7377 static void hclge_flush_link_update(struct hclge_dev *hdev)
7378 {
7379 #define HCLGE_FLUSH_LINK_TIMEOUT        100000
7380
7381         unsigned long last = hdev->serv_processed_cnt;
7382         int i = 0;
7383
7384         while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
7385                i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
7386                last == hdev->serv_processed_cnt)
7387                 usleep_range(1, 1);
7388 }
7389
7390 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
7391 {
7392         struct hclge_vport *vport = hclge_get_vport(handle);
7393         struct hclge_dev *hdev = vport->back;
7394
7395         if (enable) {
7396                 hclge_task_schedule(hdev, 0);
7397         } else {
7398                 /* Set the DOWN flag here to disable link updating */
7399                 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7400
7401                 /* flush memory to make sure DOWN is seen by service task */
7402                 smp_mb__before_atomic();
7403                 hclge_flush_link_update(hdev);
7404         }
7405 }
7406
7407 static int hclge_ae_start(struct hnae3_handle *handle)
7408 {
7409         struct hclge_vport *vport = hclge_get_vport(handle);
7410         struct hclge_dev *hdev = vport->back;
7411
7412         /* mac enable */
7413         hclge_cfg_mac_mode(hdev, true);
7414         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
7415         hdev->hw.mac.link = 0;
7416
7417         /* reset tqp stats */
7418         hclge_reset_tqp_stats(handle);
7419
7420         hclge_mac_start_phy(hdev);
7421
7422         return 0;
7423 }
7424
7425 static void hclge_ae_stop(struct hnae3_handle *handle)
7426 {
7427         struct hclge_vport *vport = hclge_get_vport(handle);
7428         struct hclge_dev *hdev = vport->back;
7429         int i;
7430
7431         set_bit(HCLGE_STATE_DOWN, &hdev->state);
7432         spin_lock_bh(&hdev->fd_rule_lock);
7433         hclge_clear_arfs_rules(handle);
7434         spin_unlock_bh(&hdev->fd_rule_lock);
7435
7436         /* If it is not PF reset, the firmware will disable the MAC,
7437          * so it only need to stop phy here.
7438          */
7439         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
7440             hdev->reset_type != HNAE3_FUNC_RESET) {
7441                 hclge_mac_stop_phy(hdev);
7442                 hclge_update_link_status(hdev);
7443                 return;
7444         }
7445
7446         for (i = 0; i < handle->kinfo.num_tqps; i++)
7447                 hclge_reset_tqp(handle, i);
7448
7449         hclge_config_mac_tnl_int(hdev, false);
7450
7451         /* Mac disable */
7452         hclge_cfg_mac_mode(hdev, false);
7453
7454         hclge_mac_stop_phy(hdev);
7455
7456         /* reset tqp stats */
7457         hclge_reset_tqp_stats(handle);
7458         hclge_update_link_status(hdev);
7459 }
7460
7461 int hclge_vport_start(struct hclge_vport *vport)
7462 {
7463         struct hclge_dev *hdev = vport->back;
7464
7465         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7466         vport->last_active_jiffies = jiffies;
7467
7468         if (test_bit(vport->vport_id, hdev->vport_config_block)) {
7469                 if (vport->vport_id) {
7470                         hclge_restore_mac_table_common(vport);
7471                         hclge_restore_vport_vlan_table(vport);
7472                 } else {
7473                         hclge_restore_hw_table(hdev);
7474                 }
7475         }
7476
7477         clear_bit(vport->vport_id, hdev->vport_config_block);
7478
7479         return 0;
7480 }
7481
7482 void hclge_vport_stop(struct hclge_vport *vport)
7483 {
7484         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7485 }
7486
7487 static int hclge_client_start(struct hnae3_handle *handle)
7488 {
7489         struct hclge_vport *vport = hclge_get_vport(handle);
7490
7491         return hclge_vport_start(vport);
7492 }
7493
7494 static void hclge_client_stop(struct hnae3_handle *handle)
7495 {
7496         struct hclge_vport *vport = hclge_get_vport(handle);
7497
7498         hclge_vport_stop(vport);
7499 }
7500
7501 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
7502                                          u16 cmdq_resp, u8  resp_code,
7503                                          enum hclge_mac_vlan_tbl_opcode op)
7504 {
7505         struct hclge_dev *hdev = vport->back;
7506
7507         if (cmdq_resp) {
7508                 dev_err(&hdev->pdev->dev,
7509                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
7510                         cmdq_resp);
7511                 return -EIO;
7512         }
7513
7514         if (op == HCLGE_MAC_VLAN_ADD) {
7515                 if (!resp_code || resp_code == 1)
7516                         return 0;
7517                 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
7518                          resp_code == HCLGE_ADD_MC_OVERFLOW)
7519                         return -ENOSPC;
7520
7521                 dev_err(&hdev->pdev->dev,
7522                         "add mac addr failed for undefined, code=%u.\n",
7523                         resp_code);
7524                 return -EIO;
7525         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
7526                 if (!resp_code) {
7527                         return 0;
7528                 } else if (resp_code == 1) {
7529                         dev_dbg(&hdev->pdev->dev,
7530                                 "remove mac addr failed for miss.\n");
7531                         return -ENOENT;
7532                 }
7533
7534                 dev_err(&hdev->pdev->dev,
7535                         "remove mac addr failed for undefined, code=%u.\n",
7536                         resp_code);
7537                 return -EIO;
7538         } else if (op == HCLGE_MAC_VLAN_LKUP) {
7539                 if (!resp_code) {
7540                         return 0;
7541                 } else if (resp_code == 1) {
7542                         dev_dbg(&hdev->pdev->dev,
7543                                 "lookup mac addr failed for miss.\n");
7544                         return -ENOENT;
7545                 }
7546
7547                 dev_err(&hdev->pdev->dev,
7548                         "lookup mac addr failed for undefined, code=%u.\n",
7549                         resp_code);
7550                 return -EIO;
7551         }
7552
7553         dev_err(&hdev->pdev->dev,
7554                 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7555
7556         return -EINVAL;
7557 }
7558
7559 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7560 {
7561 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
7562
7563         unsigned int word_num;
7564         unsigned int bit_num;
7565
7566         if (vfid > 255 || vfid < 0)
7567                 return -EIO;
7568
7569         if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
7570                 word_num = vfid / 32;
7571                 bit_num  = vfid % 32;
7572                 if (clr)
7573                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7574                 else
7575                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
7576         } else {
7577                 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
7578                 bit_num  = vfid % 32;
7579                 if (clr)
7580                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7581                 else
7582                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
7583         }
7584
7585         return 0;
7586 }
7587
7588 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7589 {
7590 #define HCLGE_DESC_NUMBER 3
7591 #define HCLGE_FUNC_NUMBER_PER_DESC 6
7592         int i, j;
7593
7594         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
7595                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7596                         if (desc[i].data[j])
7597                                 return false;
7598
7599         return true;
7600 }
7601
7602 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7603                                    const u8 *addr, bool is_mc)
7604 {
7605         const unsigned char *mac_addr = addr;
7606         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7607                        (mac_addr[0]) | (mac_addr[1] << 8);
7608         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
7609
7610         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7611         if (is_mc) {
7612                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7613                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7614         }
7615
7616         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7617         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7618 }
7619
7620 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7621                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
7622 {
7623         struct hclge_dev *hdev = vport->back;
7624         struct hclge_desc desc;
7625         u8 resp_code;
7626         u16 retval;
7627         int ret;
7628
7629         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7630
7631         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7632
7633         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7634         if (ret) {
7635                 dev_err(&hdev->pdev->dev,
7636                         "del mac addr failed for cmd_send, ret =%d.\n",
7637                         ret);
7638                 return ret;
7639         }
7640         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7641         retval = le16_to_cpu(desc.retval);
7642
7643         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7644                                              HCLGE_MAC_VLAN_REMOVE);
7645 }
7646
7647 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7648                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
7649                                      struct hclge_desc *desc,
7650                                      bool is_mc)
7651 {
7652         struct hclge_dev *hdev = vport->back;
7653         u8 resp_code;
7654         u16 retval;
7655         int ret;
7656
7657         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7658         if (is_mc) {
7659                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7660                 memcpy(desc[0].data,
7661                        req,
7662                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7663                 hclge_cmd_setup_basic_desc(&desc[1],
7664                                            HCLGE_OPC_MAC_VLAN_ADD,
7665                                            true);
7666                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7667                 hclge_cmd_setup_basic_desc(&desc[2],
7668                                            HCLGE_OPC_MAC_VLAN_ADD,
7669                                            true);
7670                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7671         } else {
7672                 memcpy(desc[0].data,
7673                        req,
7674                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7675                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7676         }
7677         if (ret) {
7678                 dev_err(&hdev->pdev->dev,
7679                         "lookup mac addr failed for cmd_send, ret =%d.\n",
7680                         ret);
7681                 return ret;
7682         }
7683         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7684         retval = le16_to_cpu(desc[0].retval);
7685
7686         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7687                                              HCLGE_MAC_VLAN_LKUP);
7688 }
7689
7690 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7691                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
7692                                   struct hclge_desc *mc_desc)
7693 {
7694         struct hclge_dev *hdev = vport->back;
7695         int cfg_status;
7696         u8 resp_code;
7697         u16 retval;
7698         int ret;
7699
7700         if (!mc_desc) {
7701                 struct hclge_desc desc;
7702
7703                 hclge_cmd_setup_basic_desc(&desc,
7704                                            HCLGE_OPC_MAC_VLAN_ADD,
7705                                            false);
7706                 memcpy(desc.data, req,
7707                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7708                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7709                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7710                 retval = le16_to_cpu(desc.retval);
7711
7712                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7713                                                            resp_code,
7714                                                            HCLGE_MAC_VLAN_ADD);
7715         } else {
7716                 hclge_cmd_reuse_desc(&mc_desc[0], false);
7717                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7718                 hclge_cmd_reuse_desc(&mc_desc[1], false);
7719                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7720                 hclge_cmd_reuse_desc(&mc_desc[2], false);
7721                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7722                 memcpy(mc_desc[0].data, req,
7723                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7724                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7725                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7726                 retval = le16_to_cpu(mc_desc[0].retval);
7727
7728                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7729                                                            resp_code,
7730                                                            HCLGE_MAC_VLAN_ADD);
7731         }
7732
7733         if (ret) {
7734                 dev_err(&hdev->pdev->dev,
7735                         "add mac addr failed for cmd_send, ret =%d.\n",
7736                         ret);
7737                 return ret;
7738         }
7739
7740         return cfg_status;
7741 }
7742
7743 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7744                                u16 *allocated_size)
7745 {
7746         struct hclge_umv_spc_alc_cmd *req;
7747         struct hclge_desc desc;
7748         int ret;
7749
7750         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7751         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7752
7753         req->space_size = cpu_to_le32(space_size);
7754
7755         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7756         if (ret) {
7757                 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7758                         ret);
7759                 return ret;
7760         }
7761
7762         *allocated_size = le32_to_cpu(desc.data[1]);
7763
7764         return 0;
7765 }
7766
7767 static int hclge_init_umv_space(struct hclge_dev *hdev)
7768 {
7769         u16 allocated_size = 0;
7770         int ret;
7771
7772         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
7773         if (ret)
7774                 return ret;
7775
7776         if (allocated_size < hdev->wanted_umv_size)
7777                 dev_warn(&hdev->pdev->dev,
7778                          "failed to alloc umv space, want %u, get %u\n",
7779                          hdev->wanted_umv_size, allocated_size);
7780
7781         hdev->max_umv_size = allocated_size;
7782         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7783         hdev->share_umv_size = hdev->priv_umv_size +
7784                         hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7785
7786         return 0;
7787 }
7788
7789 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7790 {
7791         struct hclge_vport *vport;
7792         int i;
7793
7794         for (i = 0; i < hdev->num_alloc_vport; i++) {
7795                 vport = &hdev->vport[i];
7796                 vport->used_umv_num = 0;
7797         }
7798
7799         mutex_lock(&hdev->vport_lock);
7800         hdev->share_umv_size = hdev->priv_umv_size +
7801                         hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7802         mutex_unlock(&hdev->vport_lock);
7803 }
7804
7805 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
7806 {
7807         struct hclge_dev *hdev = vport->back;
7808         bool is_full;
7809
7810         if (need_lock)
7811                 mutex_lock(&hdev->vport_lock);
7812
7813         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7814                    hdev->share_umv_size == 0);
7815
7816         if (need_lock)
7817                 mutex_unlock(&hdev->vport_lock);
7818
7819         return is_full;
7820 }
7821
7822 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7823 {
7824         struct hclge_dev *hdev = vport->back;
7825
7826         if (is_free) {
7827                 if (vport->used_umv_num > hdev->priv_umv_size)
7828                         hdev->share_umv_size++;
7829
7830                 if (vport->used_umv_num > 0)
7831                         vport->used_umv_num--;
7832         } else {
7833                 if (vport->used_umv_num >= hdev->priv_umv_size &&
7834                     hdev->share_umv_size > 0)
7835                         hdev->share_umv_size--;
7836                 vport->used_umv_num++;
7837         }
7838 }
7839
7840 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7841                                                   const u8 *mac_addr)
7842 {
7843         struct hclge_mac_node *mac_node, *tmp;
7844
7845         list_for_each_entry_safe(mac_node, tmp, list, node)
7846                 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7847                         return mac_node;
7848
7849         return NULL;
7850 }
7851
7852 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
7853                                   enum HCLGE_MAC_NODE_STATE state)
7854 {
7855         switch (state) {
7856         /* from set_rx_mode or tmp_add_list */
7857         case HCLGE_MAC_TO_ADD:
7858                 if (mac_node->state == HCLGE_MAC_TO_DEL)
7859                         mac_node->state = HCLGE_MAC_ACTIVE;
7860                 break;
7861         /* only from set_rx_mode */
7862         case HCLGE_MAC_TO_DEL:
7863                 if (mac_node->state == HCLGE_MAC_TO_ADD) {
7864                         list_del(&mac_node->node);
7865                         kfree(mac_node);
7866                 } else {
7867                         mac_node->state = HCLGE_MAC_TO_DEL;
7868                 }
7869                 break;
7870         /* only from tmp_add_list, the mac_node->state won't be
7871          * ACTIVE.
7872          */
7873         case HCLGE_MAC_ACTIVE:
7874                 if (mac_node->state == HCLGE_MAC_TO_ADD)
7875                         mac_node->state = HCLGE_MAC_ACTIVE;
7876
7877                 break;
7878         }
7879 }
7880
7881 int hclge_update_mac_list(struct hclge_vport *vport,
7882                           enum HCLGE_MAC_NODE_STATE state,
7883                           enum HCLGE_MAC_ADDR_TYPE mac_type,
7884                           const unsigned char *addr)
7885 {
7886         struct hclge_dev *hdev = vport->back;
7887         struct hclge_mac_node *mac_node;
7888         struct list_head *list;
7889
7890         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7891                 &vport->uc_mac_list : &vport->mc_mac_list;
7892
7893         spin_lock_bh(&vport->mac_list_lock);
7894
7895         /* if the mac addr is already in the mac list, no need to add a new
7896          * one into it, just check the mac addr state, convert it to a new
7897          * new state, or just remove it, or do nothing.
7898          */
7899         mac_node = hclge_find_mac_node(list, addr);
7900         if (mac_node) {
7901                 hclge_update_mac_node(mac_node, state);
7902                 spin_unlock_bh(&vport->mac_list_lock);
7903                 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7904                 return 0;
7905         }
7906
7907         /* if this address is never added, unnecessary to delete */
7908         if (state == HCLGE_MAC_TO_DEL) {
7909                 spin_unlock_bh(&vport->mac_list_lock);
7910                 dev_err(&hdev->pdev->dev,
7911                         "failed to delete address %pM from mac list\n",
7912                         addr);
7913                 return -ENOENT;
7914         }
7915
7916         mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
7917         if (!mac_node) {
7918                 spin_unlock_bh(&vport->mac_list_lock);
7919                 return -ENOMEM;
7920         }
7921
7922         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7923
7924         mac_node->state = state;
7925         ether_addr_copy(mac_node->mac_addr, addr);
7926         list_add_tail(&mac_node->node, list);
7927
7928         spin_unlock_bh(&vport->mac_list_lock);
7929
7930         return 0;
7931 }
7932
7933 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7934                              const unsigned char *addr)
7935 {
7936         struct hclge_vport *vport = hclge_get_vport(handle);
7937
7938         return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
7939                                      addr);
7940 }
7941
7942 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7943                              const unsigned char *addr)
7944 {
7945         struct hclge_dev *hdev = vport->back;
7946         struct hclge_mac_vlan_tbl_entry_cmd req;
7947         struct hclge_desc desc;
7948         u16 egress_port = 0;
7949         int ret;
7950
7951         /* mac addr check */
7952         if (is_zero_ether_addr(addr) ||
7953             is_broadcast_ether_addr(addr) ||
7954             is_multicast_ether_addr(addr)) {
7955                 dev_err(&hdev->pdev->dev,
7956                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7957                          addr, is_zero_ether_addr(addr),
7958                          is_broadcast_ether_addr(addr),
7959                          is_multicast_ether_addr(addr));
7960                 return -EINVAL;
7961         }
7962
7963         memset(&req, 0, sizeof(req));
7964
7965         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7966                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7967
7968         req.egress_port = cpu_to_le16(egress_port);
7969
7970         hclge_prepare_mac_addr(&req, addr, false);
7971
7972         /* Lookup the mac address in the mac_vlan table, and add
7973          * it if the entry is inexistent. Repeated unicast entry
7974          * is not allowed in the mac vlan table.
7975          */
7976         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7977         if (ret == -ENOENT) {
7978                 mutex_lock(&hdev->vport_lock);
7979                 if (!hclge_is_umv_space_full(vport, false)) {
7980                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7981                         if (!ret)
7982                                 hclge_update_umv_space(vport, false);
7983                         mutex_unlock(&hdev->vport_lock);
7984                         return ret;
7985                 }
7986                 mutex_unlock(&hdev->vport_lock);
7987
7988                 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
7989                         dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7990                                 hdev->priv_umv_size);
7991
7992                 return -ENOSPC;
7993         }
7994
7995         /* check if we just hit the duplicate */
7996         if (!ret) {
7997                 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7998                          vport->vport_id, addr);
7999                 return 0;
8000         }
8001
8002         dev_err(&hdev->pdev->dev,
8003                 "PF failed to add unicast entry(%pM) in the MAC table\n",
8004                 addr);
8005
8006         return ret;
8007 }
8008
8009 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8010                             const unsigned char *addr)
8011 {
8012         struct hclge_vport *vport = hclge_get_vport(handle);
8013
8014         return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8015                                      addr);
8016 }
8017
8018 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8019                             const unsigned char *addr)
8020 {
8021         struct hclge_dev *hdev = vport->back;
8022         struct hclge_mac_vlan_tbl_entry_cmd req;
8023         int ret;
8024
8025         /* mac addr check */
8026         if (is_zero_ether_addr(addr) ||
8027             is_broadcast_ether_addr(addr) ||
8028             is_multicast_ether_addr(addr)) {
8029                 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8030                         addr);
8031                 return -EINVAL;
8032         }
8033
8034         memset(&req, 0, sizeof(req));
8035         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8036         hclge_prepare_mac_addr(&req, addr, false);
8037         ret = hclge_remove_mac_vlan_tbl(vport, &req);
8038         if (!ret) {
8039                 mutex_lock(&hdev->vport_lock);
8040                 hclge_update_umv_space(vport, true);
8041                 mutex_unlock(&hdev->vport_lock);
8042         } else if (ret == -ENOENT) {
8043                 ret = 0;
8044         }
8045
8046         return ret;
8047 }
8048
8049 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8050                              const unsigned char *addr)
8051 {
8052         struct hclge_vport *vport = hclge_get_vport(handle);
8053
8054         return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8055                                      addr);
8056 }
8057
8058 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8059                              const unsigned char *addr)
8060 {
8061         struct hclge_dev *hdev = vport->back;
8062         struct hclge_mac_vlan_tbl_entry_cmd req;
8063         struct hclge_desc desc[3];
8064         int status;
8065
8066         /* mac addr check */
8067         if (!is_multicast_ether_addr(addr)) {
8068                 dev_err(&hdev->pdev->dev,
8069                         "Add mc mac err! invalid mac:%pM.\n",
8070                          addr);
8071                 return -EINVAL;
8072         }
8073         memset(&req, 0, sizeof(req));
8074         hclge_prepare_mac_addr(&req, addr, true);
8075         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8076         if (status) {
8077                 /* This mac addr do not exist, add new entry for it */
8078                 memset(desc[0].data, 0, sizeof(desc[0].data));
8079                 memset(desc[1].data, 0, sizeof(desc[0].data));
8080                 memset(desc[2].data, 0, sizeof(desc[0].data));
8081         }
8082         status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8083         if (status)
8084                 return status;
8085         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8086
8087         /* if already overflow, not to print each time */
8088         if (status == -ENOSPC &&
8089             !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8090                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8091
8092         return status;
8093 }
8094
8095 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8096                             const unsigned char *addr)
8097 {
8098         struct hclge_vport *vport = hclge_get_vport(handle);
8099
8100         return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8101                                      addr);
8102 }
8103
8104 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8105                             const unsigned char *addr)
8106 {
8107         struct hclge_dev *hdev = vport->back;
8108         struct hclge_mac_vlan_tbl_entry_cmd req;
8109         enum hclge_cmd_status status;
8110         struct hclge_desc desc[3];
8111
8112         /* mac addr check */
8113         if (!is_multicast_ether_addr(addr)) {
8114                 dev_dbg(&hdev->pdev->dev,
8115                         "Remove mc mac err! invalid mac:%pM.\n",
8116                          addr);
8117                 return -EINVAL;
8118         }
8119
8120         memset(&req, 0, sizeof(req));
8121         hclge_prepare_mac_addr(&req, addr, true);
8122         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8123         if (!status) {
8124                 /* This mac addr exist, remove this handle's VFID for it */
8125                 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8126                 if (status)
8127                         return status;
8128
8129                 if (hclge_is_all_function_id_zero(desc))
8130                         /* All the vfid is zero, so need to delete this entry */
8131                         status = hclge_remove_mac_vlan_tbl(vport, &req);
8132                 else
8133                         /* Not all the vfid is zero, update the vfid */
8134                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8135
8136         } else if (status == -ENOENT) {
8137                 status = 0;
8138         }
8139
8140         return status;
8141 }
8142
8143 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8144                                       struct list_head *list,
8145                                       int (*sync)(struct hclge_vport *,
8146                                                   const unsigned char *))
8147 {
8148         struct hclge_mac_node *mac_node, *tmp;
8149         int ret;
8150
8151         list_for_each_entry_safe(mac_node, tmp, list, node) {
8152                 ret = sync(vport, mac_node->mac_addr);
8153                 if (!ret) {
8154                         mac_node->state = HCLGE_MAC_ACTIVE;
8155                 } else {
8156                         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8157                                 &vport->state);
8158                         break;
8159                 }
8160         }
8161 }
8162
8163 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8164                                         struct list_head *list,
8165                                         int (*unsync)(struct hclge_vport *,
8166                                                       const unsigned char *))
8167 {
8168         struct hclge_mac_node *mac_node, *tmp;
8169         int ret;
8170
8171         list_for_each_entry_safe(mac_node, tmp, list, node) {
8172                 ret = unsync(vport, mac_node->mac_addr);
8173                 if (!ret || ret == -ENOENT) {
8174                         list_del(&mac_node->node);
8175                         kfree(mac_node);
8176                 } else {
8177                         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8178                                 &vport->state);
8179                         break;
8180                 }
8181         }
8182 }
8183
8184 static bool hclge_sync_from_add_list(struct list_head *add_list,
8185                                      struct list_head *mac_list)
8186 {
8187         struct hclge_mac_node *mac_node, *tmp, *new_node;
8188         bool all_added = true;
8189
8190         list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8191                 if (mac_node->state == HCLGE_MAC_TO_ADD)
8192                         all_added = false;
8193
8194                 /* if the mac address from tmp_add_list is not in the
8195                  * uc/mc_mac_list, it means have received a TO_DEL request
8196                  * during the time window of adding the mac address into mac
8197                  * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8198                  * then it will be removed at next time. else it must be TO_ADD,
8199                  * this address hasn't been added into mac table,
8200                  * so just remove the mac node.
8201                  */
8202                 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8203                 if (new_node) {
8204                         hclge_update_mac_node(new_node, mac_node->state);
8205                         list_del(&mac_node->node);
8206                         kfree(mac_node);
8207                 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8208                         mac_node->state = HCLGE_MAC_TO_DEL;
8209                         list_del(&mac_node->node);
8210                         list_add_tail(&mac_node->node, mac_list);
8211                 } else {
8212                         list_del(&mac_node->node);
8213                         kfree(mac_node);
8214                 }
8215         }
8216
8217         return all_added;
8218 }
8219
8220 static void hclge_sync_from_del_list(struct list_head *del_list,
8221                                      struct list_head *mac_list)
8222 {
8223         struct hclge_mac_node *mac_node, *tmp, *new_node;
8224
8225         list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8226                 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8227                 if (new_node) {
8228                         /* If the mac addr exists in the mac list, it means
8229                          * received a new TO_ADD request during the time window
8230                          * of configuring the mac address. For the mac node
8231                          * state is TO_ADD, and the address is already in the
8232                          * in the hardware(due to delete fail), so we just need
8233                          * to change the mac node state to ACTIVE.
8234                          */
8235                         new_node->state = HCLGE_MAC_ACTIVE;
8236                         list_del(&mac_node->node);
8237                         kfree(mac_node);
8238                 } else {
8239                         list_del(&mac_node->node);
8240                         list_add_tail(&mac_node->node, mac_list);
8241                 }
8242         }
8243 }
8244
8245 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8246                                         enum HCLGE_MAC_ADDR_TYPE mac_type,
8247                                         bool is_all_added)
8248 {
8249         if (mac_type == HCLGE_MAC_ADDR_UC) {
8250                 if (is_all_added)
8251                         vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8252                 else
8253                         vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8254         } else {
8255                 if (is_all_added)
8256                         vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8257                 else
8258                         vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8259         }
8260 }
8261
8262 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8263                                        enum HCLGE_MAC_ADDR_TYPE mac_type)
8264 {
8265         struct hclge_mac_node *mac_node, *tmp, *new_node;
8266         struct list_head tmp_add_list, tmp_del_list;
8267         struct list_head *list;
8268         bool all_added;
8269
8270         INIT_LIST_HEAD(&tmp_add_list);
8271         INIT_LIST_HEAD(&tmp_del_list);
8272
8273         /* move the mac addr to the tmp_add_list and tmp_del_list, then
8274          * we can add/delete these mac addr outside the spin lock
8275          */
8276         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8277                 &vport->uc_mac_list : &vport->mc_mac_list;
8278
8279         spin_lock_bh(&vport->mac_list_lock);
8280
8281         list_for_each_entry_safe(mac_node, tmp, list, node) {
8282                 switch (mac_node->state) {
8283                 case HCLGE_MAC_TO_DEL:
8284                         list_del(&mac_node->node);
8285                         list_add_tail(&mac_node->node, &tmp_del_list);
8286                         break;
8287                 case HCLGE_MAC_TO_ADD:
8288                         new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8289                         if (!new_node)
8290                                 goto stop_traverse;
8291                         ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8292                         new_node->state = mac_node->state;
8293                         list_add_tail(&new_node->node, &tmp_add_list);
8294                         break;
8295                 default:
8296                         break;
8297                 }
8298         }
8299
8300 stop_traverse:
8301         spin_unlock_bh(&vport->mac_list_lock);
8302
8303         /* delete first, in order to get max mac table space for adding */
8304         if (mac_type == HCLGE_MAC_ADDR_UC) {
8305                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8306                                             hclge_rm_uc_addr_common);
8307                 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8308                                           hclge_add_uc_addr_common);
8309         } else {
8310                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8311                                             hclge_rm_mc_addr_common);
8312                 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8313                                           hclge_add_mc_addr_common);
8314         }
8315
8316         /* if some mac addresses were added/deleted fail, move back to the
8317          * mac_list, and retry at next time.
8318          */
8319         spin_lock_bh(&vport->mac_list_lock);
8320
8321         hclge_sync_from_del_list(&tmp_del_list, list);
8322         all_added = hclge_sync_from_add_list(&tmp_add_list, list);
8323
8324         spin_unlock_bh(&vport->mac_list_lock);
8325
8326         hclge_update_overflow_flags(vport, mac_type, all_added);
8327 }
8328
8329 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
8330 {
8331         struct hclge_dev *hdev = vport->back;
8332
8333         if (test_bit(vport->vport_id, hdev->vport_config_block))
8334                 return false;
8335
8336         if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
8337                 return true;
8338
8339         return false;
8340 }
8341
8342 static void hclge_sync_mac_table(struct hclge_dev *hdev)
8343 {
8344         int i;
8345
8346         for (i = 0; i < hdev->num_alloc_vport; i++) {
8347                 struct hclge_vport *vport = &hdev->vport[i];
8348
8349                 if (!hclge_need_sync_mac_table(vport))
8350                         continue;
8351
8352                 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
8353                 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
8354         }
8355 }
8356
8357 static void hclge_build_del_list(struct list_head *list,
8358                                  bool is_del_list,
8359                                  struct list_head *tmp_del_list)
8360 {
8361         struct hclge_mac_node *mac_cfg, *tmp;
8362
8363         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
8364                 switch (mac_cfg->state) {
8365                 case HCLGE_MAC_TO_DEL:
8366                 case HCLGE_MAC_ACTIVE:
8367                         list_del(&mac_cfg->node);
8368                         list_add_tail(&mac_cfg->node, tmp_del_list);
8369                         break;
8370                 case HCLGE_MAC_TO_ADD:
8371                         if (is_del_list) {
8372                                 list_del(&mac_cfg->node);
8373                                 kfree(mac_cfg);
8374                         }
8375                         break;
8376                 }
8377         }
8378 }
8379
8380 static void hclge_unsync_del_list(struct hclge_vport *vport,
8381                                   int (*unsync)(struct hclge_vport *vport,
8382                                                 const unsigned char *addr),
8383                                   bool is_del_list,
8384                                   struct list_head *tmp_del_list)
8385 {
8386         struct hclge_mac_node *mac_cfg, *tmp;
8387         int ret;
8388
8389         list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
8390                 ret = unsync(vport, mac_cfg->mac_addr);
8391                 if (!ret || ret == -ENOENT) {
8392                         /* clear all mac addr from hardware, but remain these
8393                          * mac addr in the mac list, and restore them after
8394                          * vf reset finished.
8395                          */
8396                         if (!is_del_list &&
8397                             mac_cfg->state == HCLGE_MAC_ACTIVE) {
8398                                 mac_cfg->state = HCLGE_MAC_TO_ADD;
8399                         } else {
8400                                 list_del(&mac_cfg->node);
8401                                 kfree(mac_cfg);
8402                         }
8403                 } else if (is_del_list) {
8404                         mac_cfg->state = HCLGE_MAC_TO_DEL;
8405                 }
8406         }
8407 }
8408
8409 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
8410                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
8411 {
8412         int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
8413         struct hclge_dev *hdev = vport->back;
8414         struct list_head tmp_del_list, *list;
8415
8416         if (mac_type == HCLGE_MAC_ADDR_UC) {
8417                 list = &vport->uc_mac_list;
8418                 unsync = hclge_rm_uc_addr_common;
8419         } else {
8420                 list = &vport->mc_mac_list;
8421                 unsync = hclge_rm_mc_addr_common;
8422         }
8423
8424         INIT_LIST_HEAD(&tmp_del_list);
8425
8426         if (!is_del_list)
8427                 set_bit(vport->vport_id, hdev->vport_config_block);
8428
8429         spin_lock_bh(&vport->mac_list_lock);
8430
8431         hclge_build_del_list(list, is_del_list, &tmp_del_list);
8432
8433         spin_unlock_bh(&vport->mac_list_lock);
8434
8435         hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
8436
8437         spin_lock_bh(&vport->mac_list_lock);
8438
8439         hclge_sync_from_del_list(&tmp_del_list, list);
8440
8441         spin_unlock_bh(&vport->mac_list_lock);
8442 }
8443
8444 /* remove all mac address when uninitailize */
8445 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
8446                                         enum HCLGE_MAC_ADDR_TYPE mac_type)
8447 {
8448         struct hclge_mac_node *mac_node, *tmp;
8449         struct hclge_dev *hdev = vport->back;
8450         struct list_head tmp_del_list, *list;
8451
8452         INIT_LIST_HEAD(&tmp_del_list);
8453
8454         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8455                 &vport->uc_mac_list : &vport->mc_mac_list;
8456
8457         spin_lock_bh(&vport->mac_list_lock);
8458
8459         list_for_each_entry_safe(mac_node, tmp, list, node) {
8460                 switch (mac_node->state) {
8461                 case HCLGE_MAC_TO_DEL:
8462                 case HCLGE_MAC_ACTIVE:
8463                         list_del(&mac_node->node);
8464                         list_add_tail(&mac_node->node, &tmp_del_list);
8465                         break;
8466                 case HCLGE_MAC_TO_ADD:
8467                         list_del(&mac_node->node);
8468                         kfree(mac_node);
8469                         break;
8470                 }
8471         }
8472
8473         spin_unlock_bh(&vport->mac_list_lock);
8474
8475         if (mac_type == HCLGE_MAC_ADDR_UC)
8476                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8477                                             hclge_rm_uc_addr_common);
8478         else
8479                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8480                                             hclge_rm_mc_addr_common);
8481
8482         if (!list_empty(&tmp_del_list))
8483                 dev_warn(&hdev->pdev->dev,
8484                          "uninit %s mac list for vport %u not completely.\n",
8485                          mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
8486                          vport->vport_id);
8487
8488         list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
8489                 list_del(&mac_node->node);
8490                 kfree(mac_node);
8491         }
8492 }
8493
8494 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
8495 {
8496         struct hclge_vport *vport;
8497         int i;
8498
8499         for (i = 0; i < hdev->num_alloc_vport; i++) {
8500                 vport = &hdev->vport[i];
8501                 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
8502                 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
8503         }
8504 }
8505
8506 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
8507                                               u16 cmdq_resp, u8 resp_code)
8508 {
8509 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
8510 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
8511 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
8512 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
8513
8514         int return_status;
8515
8516         if (cmdq_resp) {
8517                 dev_err(&hdev->pdev->dev,
8518                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
8519                         cmdq_resp);
8520                 return -EIO;
8521         }
8522
8523         switch (resp_code) {
8524         case HCLGE_ETHERTYPE_SUCCESS_ADD:
8525         case HCLGE_ETHERTYPE_ALREADY_ADD:
8526                 return_status = 0;
8527                 break;
8528         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
8529                 dev_err(&hdev->pdev->dev,
8530                         "add mac ethertype failed for manager table overflow.\n");
8531                 return_status = -EIO;
8532                 break;
8533         case HCLGE_ETHERTYPE_KEY_CONFLICT:
8534                 dev_err(&hdev->pdev->dev,
8535                         "add mac ethertype failed for key conflict.\n");
8536                 return_status = -EIO;
8537                 break;
8538         default:
8539                 dev_err(&hdev->pdev->dev,
8540                         "add mac ethertype failed for undefined, code=%u.\n",
8541                         resp_code);
8542                 return_status = -EIO;
8543         }
8544
8545         return return_status;
8546 }
8547
8548 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
8549                                      u8 *mac_addr)
8550 {
8551         struct hclge_mac_vlan_tbl_entry_cmd req;
8552         struct hclge_dev *hdev = vport->back;
8553         struct hclge_desc desc;
8554         u16 egress_port = 0;
8555         int i;
8556
8557         if (is_zero_ether_addr(mac_addr))
8558                 return false;
8559
8560         memset(&req, 0, sizeof(req));
8561         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8562                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8563         req.egress_port = cpu_to_le16(egress_port);
8564         hclge_prepare_mac_addr(&req, mac_addr, false);
8565
8566         if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8567                 return true;
8568
8569         vf_idx += HCLGE_VF_VPORT_START_NUM;
8570         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8571                 if (i != vf_idx &&
8572                     ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8573                         return true;
8574
8575         return false;
8576 }
8577
8578 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8579                             u8 *mac_addr)
8580 {
8581         struct hclge_vport *vport = hclge_get_vport(handle);
8582         struct hclge_dev *hdev = vport->back;
8583
8584         vport = hclge_get_vf_vport(hdev, vf);
8585         if (!vport)
8586                 return -EINVAL;
8587
8588         if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8589                 dev_info(&hdev->pdev->dev,
8590                          "Specified MAC(=%pM) is same as before, no change committed!\n",
8591                          mac_addr);
8592                 return 0;
8593         }
8594
8595         if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8596                 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8597                         mac_addr);
8598                 return -EEXIST;
8599         }
8600
8601         ether_addr_copy(vport->vf_info.mac, mac_addr);
8602
8603         if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8604                 dev_info(&hdev->pdev->dev,
8605                          "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8606                          vf, mac_addr);
8607                 return hclge_inform_reset_assert_to_vf(vport);
8608         }
8609
8610         dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8611                  vf, mac_addr);
8612         return 0;
8613 }
8614
8615 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8616                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
8617 {
8618         struct hclge_desc desc;
8619         u8 resp_code;
8620         u16 retval;
8621         int ret;
8622
8623         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8624         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8625
8626         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8627         if (ret) {
8628                 dev_err(&hdev->pdev->dev,
8629                         "add mac ethertype failed for cmd_send, ret =%d.\n",
8630                         ret);
8631                 return ret;
8632         }
8633
8634         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8635         retval = le16_to_cpu(desc.retval);
8636
8637         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8638 }
8639
8640 static int init_mgr_tbl(struct hclge_dev *hdev)
8641 {
8642         int ret;
8643         int i;
8644
8645         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8646                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8647                 if (ret) {
8648                         dev_err(&hdev->pdev->dev,
8649                                 "add mac ethertype failed, ret =%d.\n",
8650                                 ret);
8651                         return ret;
8652                 }
8653         }
8654
8655         return 0;
8656 }
8657
8658 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8659 {
8660         struct hclge_vport *vport = hclge_get_vport(handle);
8661         struct hclge_dev *hdev = vport->back;
8662
8663         ether_addr_copy(p, hdev->hw.mac.mac_addr);
8664 }
8665
8666 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8667                                        const u8 *old_addr, const u8 *new_addr)
8668 {
8669         struct list_head *list = &vport->uc_mac_list;
8670         struct hclge_mac_node *old_node, *new_node;
8671
8672         new_node = hclge_find_mac_node(list, new_addr);
8673         if (!new_node) {
8674                 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8675                 if (!new_node)
8676                         return -ENOMEM;
8677
8678                 new_node->state = HCLGE_MAC_TO_ADD;
8679                 ether_addr_copy(new_node->mac_addr, new_addr);
8680                 list_add(&new_node->node, list);
8681         } else {
8682                 if (new_node->state == HCLGE_MAC_TO_DEL)
8683                         new_node->state = HCLGE_MAC_ACTIVE;
8684
8685                 /* make sure the new addr is in the list head, avoid dev
8686                  * addr may be not re-added into mac table for the umv space
8687                  * limitation after global/imp reset which will clear mac
8688                  * table by hardware.
8689                  */
8690                 list_move(&new_node->node, list);
8691         }
8692
8693         if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8694                 old_node = hclge_find_mac_node(list, old_addr);
8695                 if (old_node) {
8696                         if (old_node->state == HCLGE_MAC_TO_ADD) {
8697                                 list_del(&old_node->node);
8698                                 kfree(old_node);
8699                         } else {
8700                                 old_node->state = HCLGE_MAC_TO_DEL;
8701                         }
8702                 }
8703         }
8704
8705         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8706
8707         return 0;
8708 }
8709
8710 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8711                               bool is_first)
8712 {
8713         const unsigned char *new_addr = (const unsigned char *)p;
8714         struct hclge_vport *vport = hclge_get_vport(handle);
8715         struct hclge_dev *hdev = vport->back;
8716         unsigned char *old_addr = NULL;
8717         int ret;
8718
8719         /* mac addr check */
8720         if (is_zero_ether_addr(new_addr) ||
8721             is_broadcast_ether_addr(new_addr) ||
8722             is_multicast_ether_addr(new_addr)) {
8723                 dev_err(&hdev->pdev->dev,
8724                         "change uc mac err! invalid mac: %pM.\n",
8725                          new_addr);
8726                 return -EINVAL;
8727         }
8728
8729         ret = hclge_pause_addr_cfg(hdev, new_addr);
8730         if (ret) {
8731                 dev_err(&hdev->pdev->dev,
8732                         "failed to configure mac pause address, ret = %d\n",
8733                         ret);
8734                 return ret;
8735         }
8736
8737         if (!is_first)
8738                 old_addr = hdev->hw.mac.mac_addr;
8739
8740         spin_lock_bh(&vport->mac_list_lock);
8741         ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
8742         if (ret) {
8743                 dev_err(&hdev->pdev->dev,
8744                         "failed to change the mac addr:%pM, ret = %d\n",
8745                         new_addr, ret);
8746                 spin_unlock_bh(&vport->mac_list_lock);
8747
8748                 if (!is_first)
8749                         hclge_pause_addr_cfg(hdev, old_addr);
8750
8751                 return ret;
8752         }
8753         /* we must update dev addr with spin lock protect, preventing dev addr
8754          * being removed by set_rx_mode path.
8755          */
8756         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
8757         spin_unlock_bh(&vport->mac_list_lock);
8758
8759         hclge_task_schedule(hdev, 0);
8760
8761         return 0;
8762 }
8763
8764 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8765                           int cmd)
8766 {
8767         struct hclge_vport *vport = hclge_get_vport(handle);
8768         struct hclge_dev *hdev = vport->back;
8769
8770         if (!hdev->hw.mac.phydev)
8771                 return -EOPNOTSUPP;
8772
8773         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8774 }
8775
8776 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
8777                                       u8 fe_type, bool filter_en, u8 vf_id)
8778 {
8779         struct hclge_vlan_filter_ctrl_cmd *req;
8780         struct hclge_desc desc;
8781         int ret;
8782
8783         /* read current vlan filter parameter */
8784         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
8785         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
8786         req->vlan_type = vlan_type;
8787         req->vf_id = vf_id;
8788
8789         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8790         if (ret) {
8791                 dev_err(&hdev->pdev->dev,
8792                         "failed to get vlan filter config, ret = %d.\n", ret);
8793                 return ret;
8794         }
8795
8796         /* modify and write new config parameter */
8797         hclge_cmd_reuse_desc(&desc, false);
8798         req->vlan_fe = filter_en ?
8799                         (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
8800
8801         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8802         if (ret)
8803                 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
8804                         ret);
8805
8806         return ret;
8807 }
8808
8809 #define HCLGE_FILTER_TYPE_VF            0
8810 #define HCLGE_FILTER_TYPE_PORT          1
8811 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
8812 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
8813 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
8814 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
8815 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
8816 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
8817                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
8818 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
8819                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
8820
8821 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
8822 {
8823         struct hclge_vport *vport = hclge_get_vport(handle);
8824         struct hclge_dev *hdev = vport->back;
8825
8826         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8827                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8828                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
8829                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8830                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
8831         } else {
8832                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8833                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
8834                                            0);
8835         }
8836         if (enable)
8837                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8838         else
8839                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
8840 }
8841
8842 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
8843                                         bool is_kill, u16 vlan,
8844                                         struct hclge_desc *desc)
8845 {
8846         struct hclge_vlan_filter_vf_cfg_cmd *req0;
8847         struct hclge_vlan_filter_vf_cfg_cmd *req1;
8848         u8 vf_byte_val;
8849         u8 vf_byte_off;
8850         int ret;
8851
8852         hclge_cmd_setup_basic_desc(&desc[0],
8853                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8854         hclge_cmd_setup_basic_desc(&desc[1],
8855                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8856
8857         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8858
8859         vf_byte_off = vfid / 8;
8860         vf_byte_val = 1 << (vfid % 8);
8861
8862         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8863         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
8864
8865         req0->vlan_id  = cpu_to_le16(vlan);
8866         req0->vlan_cfg = is_kill;
8867
8868         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
8869                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
8870         else
8871                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
8872
8873         ret = hclge_cmd_send(&hdev->hw, desc, 2);
8874         if (ret) {
8875                 dev_err(&hdev->pdev->dev,
8876                         "Send vf vlan command fail, ret =%d.\n",
8877                         ret);
8878                 return ret;
8879         }
8880
8881         return 0;
8882 }
8883
8884 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
8885                                           bool is_kill, struct hclge_desc *desc)
8886 {
8887         struct hclge_vlan_filter_vf_cfg_cmd *req;
8888
8889         req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8890
8891         if (!is_kill) {
8892 #define HCLGE_VF_VLAN_NO_ENTRY  2
8893                 if (!req->resp_code || req->resp_code == 1)
8894                         return 0;
8895
8896                 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
8897                         set_bit(vfid, hdev->vf_vlan_full);
8898                         dev_warn(&hdev->pdev->dev,
8899                                  "vf vlan table is full, vf vlan filter is disabled\n");
8900                         return 0;
8901                 }
8902
8903                 dev_err(&hdev->pdev->dev,
8904                         "Add vf vlan filter fail, ret =%u.\n",
8905                         req->resp_code);
8906         } else {
8907 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
8908                 if (!req->resp_code)
8909                         return 0;
8910
8911                 /* vf vlan filter is disabled when vf vlan table is full,
8912                  * then new vlan id will not be added into vf vlan table.
8913                  * Just return 0 without warning, avoid massive verbose
8914                  * print logs when unload.
8915                  */
8916                 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
8917                         return 0;
8918
8919                 dev_err(&hdev->pdev->dev,
8920                         "Kill vf vlan filter fail, ret =%u.\n",
8921                         req->resp_code);
8922         }
8923
8924         return -EIO;
8925 }
8926
8927 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
8928                                     bool is_kill, u16 vlan,
8929                                     __be16 proto)
8930 {
8931         struct hclge_vport *vport = &hdev->vport[vfid];
8932         struct hclge_desc desc[2];
8933         int ret;
8934
8935         /* if vf vlan table is full, firmware will close vf vlan filter, it
8936          * is unable and unnecessary to add new vlan id to vf vlan filter.
8937          * If spoof check is enable, and vf vlan is full, it shouldn't add
8938          * new vlan, because tx packets with these vlan id will be dropped.
8939          */
8940         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
8941                 if (vport->vf_info.spoofchk && vlan) {
8942                         dev_err(&hdev->pdev->dev,
8943                                 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
8944                         return -EPERM;
8945                 }
8946                 return 0;
8947         }
8948
8949         ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
8950         if (ret)
8951                 return ret;
8952
8953         return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
8954 }
8955
8956 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
8957                                       u16 vlan_id, bool is_kill)
8958 {
8959         struct hclge_vlan_filter_pf_cfg_cmd *req;
8960         struct hclge_desc desc;
8961         u8 vlan_offset_byte_val;
8962         u8 vlan_offset_byte;
8963         u8 vlan_offset_160;
8964         int ret;
8965
8966         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
8967
8968         vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
8969         vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
8970                            HCLGE_VLAN_BYTE_SIZE;
8971         vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
8972
8973         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
8974         req->vlan_offset = vlan_offset_160;
8975         req->vlan_cfg = is_kill;
8976         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
8977
8978         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8979         if (ret)
8980                 dev_err(&hdev->pdev->dev,
8981                         "port vlan command, send fail, ret =%d.\n", ret);
8982         return ret;
8983 }
8984
8985 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
8986                                     u16 vport_id, u16 vlan_id,
8987                                     bool is_kill)
8988 {
8989         u16 vport_idx, vport_num = 0;
8990         int ret;
8991
8992         if (is_kill && !vlan_id)
8993                 return 0;
8994
8995         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
8996                                        proto);
8997         if (ret) {
8998                 dev_err(&hdev->pdev->dev,
8999                         "Set %u vport vlan filter config fail, ret =%d.\n",
9000                         vport_id, ret);
9001                 return ret;
9002         }
9003
9004         /* vlan 0 may be added twice when 8021q module is enabled */
9005         if (!is_kill && !vlan_id &&
9006             test_bit(vport_id, hdev->vlan_table[vlan_id]))
9007                 return 0;
9008
9009         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9010                 dev_err(&hdev->pdev->dev,
9011                         "Add port vlan failed, vport %u is already in vlan %u\n",
9012                         vport_id, vlan_id);
9013                 return -EINVAL;
9014         }
9015
9016         if (is_kill &&
9017             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9018                 dev_err(&hdev->pdev->dev,
9019                         "Delete port vlan failed, vport %u is not in vlan %u\n",
9020                         vport_id, vlan_id);
9021                 return -EINVAL;
9022         }
9023
9024         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9025                 vport_num++;
9026
9027         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9028                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9029                                                  is_kill);
9030
9031         return ret;
9032 }
9033
9034 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9035 {
9036         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9037         struct hclge_vport_vtag_tx_cfg_cmd *req;
9038         struct hclge_dev *hdev = vport->back;
9039         struct hclge_desc desc;
9040         u16 bmap_index;
9041         int status;
9042
9043         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9044
9045         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9046         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9047         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9048         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9049                       vcfg->accept_tag1 ? 1 : 0);
9050         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9051                       vcfg->accept_untag1 ? 1 : 0);
9052         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9053                       vcfg->accept_tag2 ? 1 : 0);
9054         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9055                       vcfg->accept_untag2 ? 1 : 0);
9056         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9057                       vcfg->insert_tag1_en ? 1 : 0);
9058         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9059                       vcfg->insert_tag2_en ? 1 : 0);
9060         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9061                       vcfg->tag_shift_mode_en ? 1 : 0);
9062         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9063
9064         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9065         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9066                         HCLGE_VF_NUM_PER_BYTE;
9067         req->vf_bitmap[bmap_index] =
9068                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9069
9070         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9071         if (status)
9072                 dev_err(&hdev->pdev->dev,
9073                         "Send port txvlan cfg command fail, ret =%d\n",
9074                         status);
9075
9076         return status;
9077 }
9078
9079 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9080 {
9081         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9082         struct hclge_vport_vtag_rx_cfg_cmd *req;
9083         struct hclge_dev *hdev = vport->back;
9084         struct hclge_desc desc;
9085         u16 bmap_index;
9086         int status;
9087
9088         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9089
9090         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9091         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9092                       vcfg->strip_tag1_en ? 1 : 0);
9093         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9094                       vcfg->strip_tag2_en ? 1 : 0);
9095         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9096                       vcfg->vlan1_vlan_prionly ? 1 : 0);
9097         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9098                       vcfg->vlan2_vlan_prionly ? 1 : 0);
9099         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9100                       vcfg->strip_tag1_discard_en ? 1 : 0);
9101         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9102                       vcfg->strip_tag2_discard_en ? 1 : 0);
9103
9104         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9105         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9106                         HCLGE_VF_NUM_PER_BYTE;
9107         req->vf_bitmap[bmap_index] =
9108                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9109
9110         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9111         if (status)
9112                 dev_err(&hdev->pdev->dev,
9113                         "Send port rxvlan cfg command fail, ret =%d\n",
9114                         status);
9115
9116         return status;
9117 }
9118
9119 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9120                                   u16 port_base_vlan_state,
9121                                   u16 vlan_tag)
9122 {
9123         int ret;
9124
9125         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9126                 vport->txvlan_cfg.accept_tag1 = true;
9127                 vport->txvlan_cfg.insert_tag1_en = false;
9128                 vport->txvlan_cfg.default_tag1 = 0;
9129         } else {
9130                 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9131
9132                 vport->txvlan_cfg.accept_tag1 =
9133                         ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9134                 vport->txvlan_cfg.insert_tag1_en = true;
9135                 vport->txvlan_cfg.default_tag1 = vlan_tag;
9136         }
9137
9138         vport->txvlan_cfg.accept_untag1 = true;
9139
9140         /* accept_tag2 and accept_untag2 are not supported on
9141          * pdev revision(0x20), new revision support them,
9142          * this two fields can not be configured by user.
9143          */
9144         vport->txvlan_cfg.accept_tag2 = true;
9145         vport->txvlan_cfg.accept_untag2 = true;
9146         vport->txvlan_cfg.insert_tag2_en = false;
9147         vport->txvlan_cfg.default_tag2 = 0;
9148         vport->txvlan_cfg.tag_shift_mode_en = true;
9149
9150         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9151                 vport->rxvlan_cfg.strip_tag1_en = false;
9152                 vport->rxvlan_cfg.strip_tag2_en =
9153                                 vport->rxvlan_cfg.rx_vlan_offload_en;
9154                 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9155         } else {
9156                 vport->rxvlan_cfg.strip_tag1_en =
9157                                 vport->rxvlan_cfg.rx_vlan_offload_en;
9158                 vport->rxvlan_cfg.strip_tag2_en = true;
9159                 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9160         }
9161
9162         vport->rxvlan_cfg.strip_tag1_discard_en = false;
9163         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9164         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9165
9166         ret = hclge_set_vlan_tx_offload_cfg(vport);
9167         if (ret)
9168                 return ret;
9169
9170         return hclge_set_vlan_rx_offload_cfg(vport);
9171 }
9172
9173 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9174 {
9175         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9176         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9177         struct hclge_desc desc;
9178         int status;
9179
9180         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9181         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9182         rx_req->ot_fst_vlan_type =
9183                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9184         rx_req->ot_sec_vlan_type =
9185                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9186         rx_req->in_fst_vlan_type =
9187                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9188         rx_req->in_sec_vlan_type =
9189                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9190
9191         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9192         if (status) {
9193                 dev_err(&hdev->pdev->dev,
9194                         "Send rxvlan protocol type command fail, ret =%d\n",
9195                         status);
9196                 return status;
9197         }
9198
9199         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9200
9201         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9202         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9203         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9204
9205         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9206         if (status)
9207                 dev_err(&hdev->pdev->dev,
9208                         "Send txvlan protocol type command fail, ret =%d\n",
9209                         status);
9210
9211         return status;
9212 }
9213
9214 static int hclge_init_vlan_config(struct hclge_dev *hdev)
9215 {
9216 #define HCLGE_DEF_VLAN_TYPE             0x8100
9217
9218         struct hnae3_handle *handle = &hdev->vport[0].nic;
9219         struct hclge_vport *vport;
9220         int ret;
9221         int i;
9222
9223         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9224                 /* for revision 0x21, vf vlan filter is per function */
9225                 for (i = 0; i < hdev->num_alloc_vport; i++) {
9226                         vport = &hdev->vport[i];
9227                         ret = hclge_set_vlan_filter_ctrl(hdev,
9228                                                          HCLGE_FILTER_TYPE_VF,
9229                                                          HCLGE_FILTER_FE_EGRESS,
9230                                                          true,
9231                                                          vport->vport_id);
9232                         if (ret)
9233                                 return ret;
9234                 }
9235
9236                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9237                                                  HCLGE_FILTER_FE_INGRESS, true,
9238                                                  0);
9239                 if (ret)
9240                         return ret;
9241         } else {
9242                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9243                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
9244                                                  true, 0);
9245                 if (ret)
9246                         return ret;
9247         }
9248
9249         handle->netdev_flags |= HNAE3_VLAN_FLTR;
9250
9251         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9252         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9253         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9254         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9255         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
9256         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
9257
9258         ret = hclge_set_vlan_protocol_type(hdev);
9259         if (ret)
9260                 return ret;
9261
9262         for (i = 0; i < hdev->num_alloc_vport; i++) {
9263                 u16 vlan_tag;
9264
9265                 vport = &hdev->vport[i];
9266                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9267
9268                 ret = hclge_vlan_offload_cfg(vport,
9269                                              vport->port_base_vlan_cfg.state,
9270                                              vlan_tag);
9271                 if (ret)
9272                         return ret;
9273         }
9274
9275         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
9276 }
9277
9278 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9279                                        bool writen_to_tbl)
9280 {
9281         struct hclge_vport_vlan_cfg *vlan;
9282
9283         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
9284         if (!vlan)
9285                 return;
9286
9287         vlan->hd_tbl_status = writen_to_tbl;
9288         vlan->vlan_id = vlan_id;
9289
9290         list_add_tail(&vlan->node, &vport->vlan_list);
9291 }
9292
9293 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
9294 {
9295         struct hclge_vport_vlan_cfg *vlan, *tmp;
9296         struct hclge_dev *hdev = vport->back;
9297         int ret;
9298
9299         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9300                 if (!vlan->hd_tbl_status) {
9301                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9302                                                        vport->vport_id,
9303                                                        vlan->vlan_id, false);
9304                         if (ret) {
9305                                 dev_err(&hdev->pdev->dev,
9306                                         "restore vport vlan list failed, ret=%d\n",
9307                                         ret);
9308                                 return ret;
9309                         }
9310                 }
9311                 vlan->hd_tbl_status = true;
9312         }
9313
9314         return 0;
9315 }
9316
9317 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9318                                       bool is_write_tbl)
9319 {
9320         struct hclge_vport_vlan_cfg *vlan, *tmp;
9321         struct hclge_dev *hdev = vport->back;
9322
9323         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9324                 if (vlan->vlan_id == vlan_id) {
9325                         if (is_write_tbl && vlan->hd_tbl_status)
9326                                 hclge_set_vlan_filter_hw(hdev,
9327                                                          htons(ETH_P_8021Q),
9328                                                          vport->vport_id,
9329                                                          vlan_id,
9330                                                          true);
9331
9332                         list_del(&vlan->node);
9333                         kfree(vlan);
9334                         break;
9335                 }
9336         }
9337 }
9338
9339 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
9340 {
9341         struct hclge_vport_vlan_cfg *vlan, *tmp;
9342         struct hclge_dev *hdev = vport->back;
9343
9344         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9345                 if (vlan->hd_tbl_status)
9346                         hclge_set_vlan_filter_hw(hdev,
9347                                                  htons(ETH_P_8021Q),
9348                                                  vport->vport_id,
9349                                                  vlan->vlan_id,
9350                                                  true);
9351
9352                 vlan->hd_tbl_status = false;
9353                 if (is_del_list) {
9354                         list_del(&vlan->node);
9355                         kfree(vlan);
9356                 }
9357         }
9358         clear_bit(vport->vport_id, hdev->vf_vlan_full);
9359 }
9360
9361 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
9362 {
9363         struct hclge_vport_vlan_cfg *vlan, *tmp;
9364         struct hclge_vport *vport;
9365         int i;
9366
9367         for (i = 0; i < hdev->num_alloc_vport; i++) {
9368                 vport = &hdev->vport[i];
9369                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9370                         list_del(&vlan->node);
9371                         kfree(vlan);
9372                 }
9373         }
9374 }
9375
9376 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
9377 {
9378         struct hclge_vport_vlan_cfg *vlan, *tmp;
9379         struct hclge_dev *hdev = vport->back;
9380         u16 vlan_proto;
9381         u16 vlan_id;
9382         u16 state;
9383         int ret;
9384
9385         vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
9386         vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9387         state = vport->port_base_vlan_cfg.state;
9388
9389         if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
9390                 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
9391                 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
9392                                          vport->vport_id, vlan_id,
9393                                          false);
9394                 return;
9395         }
9396
9397         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9398                 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9399                                                vport->vport_id,
9400                                                vlan->vlan_id, false);
9401                 if (ret)
9402                         break;
9403                 vlan->hd_tbl_status = true;
9404         }
9405 }
9406
9407 /* For global reset and imp reset, hardware will clear the mac table,
9408  * so we change the mac address state from ACTIVE to TO_ADD, then they
9409  * can be restored in the service task after reset complete. Furtherly,
9410  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
9411  * be restored after reset, so just remove these mac nodes from mac_list.
9412  */
9413 static void hclge_mac_node_convert_for_reset(struct list_head *list)
9414 {
9415         struct hclge_mac_node *mac_node, *tmp;
9416
9417         list_for_each_entry_safe(mac_node, tmp, list, node) {
9418                 if (mac_node->state == HCLGE_MAC_ACTIVE) {
9419                         mac_node->state = HCLGE_MAC_TO_ADD;
9420                 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
9421                         list_del(&mac_node->node);
9422                         kfree(mac_node);
9423                 }
9424         }
9425 }
9426
9427 void hclge_restore_mac_table_common(struct hclge_vport *vport)
9428 {
9429         spin_lock_bh(&vport->mac_list_lock);
9430
9431         hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
9432         hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
9433         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9434
9435         spin_unlock_bh(&vport->mac_list_lock);
9436 }
9437
9438 static void hclge_restore_hw_table(struct hclge_dev *hdev)
9439 {
9440         struct hclge_vport *vport = &hdev->vport[0];
9441         struct hnae3_handle *handle = &vport->nic;
9442
9443         hclge_restore_mac_table_common(vport);
9444         hclge_restore_vport_vlan_table(vport);
9445         set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
9446
9447         hclge_restore_fd_entries(handle);
9448 }
9449
9450 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
9451 {
9452         struct hclge_vport *vport = hclge_get_vport(handle);
9453
9454         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9455                 vport->rxvlan_cfg.strip_tag1_en = false;
9456                 vport->rxvlan_cfg.strip_tag2_en = enable;
9457                 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9458         } else {
9459                 vport->rxvlan_cfg.strip_tag1_en = enable;
9460                 vport->rxvlan_cfg.strip_tag2_en = true;
9461                 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9462         }
9463
9464         vport->rxvlan_cfg.strip_tag1_discard_en = false;
9465         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9466         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9467         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
9468
9469         return hclge_set_vlan_rx_offload_cfg(vport);
9470 }
9471
9472 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
9473                                             u16 port_base_vlan_state,
9474                                             struct hclge_vlan_info *new_info,
9475                                             struct hclge_vlan_info *old_info)
9476 {
9477         struct hclge_dev *hdev = vport->back;
9478         int ret;
9479
9480         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
9481                 hclge_rm_vport_all_vlan_table(vport, false);
9482                 return hclge_set_vlan_filter_hw(hdev,
9483                                                  htons(new_info->vlan_proto),
9484                                                  vport->vport_id,
9485                                                  new_info->vlan_tag,
9486                                                  false);
9487         }
9488
9489         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
9490                                        vport->vport_id, old_info->vlan_tag,
9491                                        true);
9492         if (ret)
9493                 return ret;
9494
9495         return hclge_add_vport_all_vlan_table(vport);
9496 }
9497
9498 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
9499                                     struct hclge_vlan_info *vlan_info)
9500 {
9501         struct hnae3_handle *nic = &vport->nic;
9502         struct hclge_vlan_info *old_vlan_info;
9503         struct hclge_dev *hdev = vport->back;
9504         int ret;
9505
9506         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9507
9508         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
9509         if (ret)
9510                 return ret;
9511
9512         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
9513                 /* add new VLAN tag */
9514                 ret = hclge_set_vlan_filter_hw(hdev,
9515                                                htons(vlan_info->vlan_proto),
9516                                                vport->vport_id,
9517                                                vlan_info->vlan_tag,
9518                                                false);
9519                 if (ret)
9520                         return ret;
9521
9522                 /* remove old VLAN tag */
9523                 ret = hclge_set_vlan_filter_hw(hdev,
9524                                                htons(old_vlan_info->vlan_proto),
9525                                                vport->vport_id,
9526                                                old_vlan_info->vlan_tag,
9527                                                true);
9528                 if (ret)
9529                         return ret;
9530
9531                 goto update;
9532         }
9533
9534         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
9535                                                old_vlan_info);
9536         if (ret)
9537                 return ret;
9538
9539         /* update state only when disable/enable port based VLAN */
9540         vport->port_base_vlan_cfg.state = state;
9541         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
9542                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
9543         else
9544                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
9545
9546 update:
9547         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
9548         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
9549         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
9550
9551         return 0;
9552 }
9553
9554 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
9555                                           enum hnae3_port_base_vlan_state state,
9556                                           u16 vlan)
9557 {
9558         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9559                 if (!vlan)
9560                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9561                 else
9562                         return HNAE3_PORT_BASE_VLAN_ENABLE;
9563         } else {
9564                 if (!vlan)
9565                         return HNAE3_PORT_BASE_VLAN_DISABLE;
9566                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
9567                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9568                 else
9569                         return HNAE3_PORT_BASE_VLAN_MODIFY;
9570         }
9571 }
9572
9573 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
9574                                     u16 vlan, u8 qos, __be16 proto)
9575 {
9576         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
9577         struct hclge_vport *vport = hclge_get_vport(handle);
9578         struct hclge_dev *hdev = vport->back;
9579         struct hclge_vlan_info vlan_info;
9580         u16 state;
9581         int ret;
9582
9583         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9584                 return -EOPNOTSUPP;
9585
9586         vport = hclge_get_vf_vport(hdev, vfid);
9587         if (!vport)
9588                 return -EINVAL;
9589
9590         /* qos is a 3 bits value, so can not be bigger than 7 */
9591         if (vlan > VLAN_N_VID - 1 || qos > 7)
9592                 return -EINVAL;
9593         if (proto != htons(ETH_P_8021Q))
9594                 return -EPROTONOSUPPORT;
9595
9596         state = hclge_get_port_base_vlan_state(vport,
9597                                                vport->port_base_vlan_cfg.state,
9598                                                vlan);
9599         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
9600                 return 0;
9601
9602         vlan_info.vlan_tag = vlan;
9603         vlan_info.qos = qos;
9604         vlan_info.vlan_proto = ntohs(proto);
9605
9606         ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
9607         if (ret) {
9608                 dev_err(&hdev->pdev->dev,
9609                         "failed to update port base vlan for vf %d, ret = %d\n",
9610                         vfid, ret);
9611                 return ret;
9612         }
9613
9614         /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
9615          * VLAN state.
9616          */
9617         if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
9618             test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
9619                 hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9620                                                   vport->vport_id, state,
9621                                                   vlan, qos,
9622                                                   ntohs(proto));
9623
9624         return 0;
9625 }
9626
9627 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9628 {
9629         struct hclge_vlan_info *vlan_info;
9630         struct hclge_vport *vport;
9631         int ret;
9632         int vf;
9633
9634         /* clear port base vlan for all vf */
9635         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9636                 vport = &hdev->vport[vf];
9637                 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9638
9639                 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9640                                                vport->vport_id,
9641                                                vlan_info->vlan_tag, true);
9642                 if (ret)
9643                         dev_err(&hdev->pdev->dev,
9644                                 "failed to clear vf vlan for vf%d, ret = %d\n",
9645                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
9646         }
9647 }
9648
9649 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9650                           u16 vlan_id, bool is_kill)
9651 {
9652         struct hclge_vport *vport = hclge_get_vport(handle);
9653         struct hclge_dev *hdev = vport->back;
9654         bool writen_to_tbl = false;
9655         int ret = 0;
9656
9657         /* When device is resetting or reset failed, firmware is unable to
9658          * handle mailbox. Just record the vlan id, and remove it after
9659          * reset finished.
9660          */
9661         if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9662              test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
9663                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9664                 return -EBUSY;
9665         }
9666
9667         /* when port base vlan enabled, we use port base vlan as the vlan
9668          * filter entry. In this case, we don't update vlan filter table
9669          * when user add new vlan or remove exist vlan, just update the vport
9670          * vlan list. The vlan id in vlan list will be writen in vlan filter
9671          * table until port base vlan disabled
9672          */
9673         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9674                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
9675                                                vlan_id, is_kill);
9676                 writen_to_tbl = true;
9677         }
9678
9679         if (!ret) {
9680                 if (is_kill)
9681                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
9682                 else
9683                         hclge_add_vport_vlan_table(vport, vlan_id,
9684                                                    writen_to_tbl);
9685         } else if (is_kill) {
9686                 /* when remove hw vlan filter failed, record the vlan id,
9687                  * and try to remove it from hw later, to be consistence
9688                  * with stack
9689                  */
9690                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9691         }
9692         return ret;
9693 }
9694
9695 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9696 {
9697 #define HCLGE_MAX_SYNC_COUNT    60
9698
9699         int i, ret, sync_cnt = 0;
9700         u16 vlan_id;
9701
9702         /* start from vport 1 for PF is always alive */
9703         for (i = 0; i < hdev->num_alloc_vport; i++) {
9704                 struct hclge_vport *vport = &hdev->vport[i];
9705
9706                 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9707                                          VLAN_N_VID);
9708                 while (vlan_id != VLAN_N_VID) {
9709                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9710                                                        vport->vport_id, vlan_id,
9711                                                        true);
9712                         if (ret && ret != -EINVAL)
9713                                 return;
9714
9715                         clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9716                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
9717
9718                         sync_cnt++;
9719                         if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9720                                 return;
9721
9722                         vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9723                                                  VLAN_N_VID);
9724                 }
9725         }
9726 }
9727
9728 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
9729 {
9730         struct hclge_config_max_frm_size_cmd *req;
9731         struct hclge_desc desc;
9732
9733         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9734
9735         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
9736         req->max_frm_size = cpu_to_le16(new_mps);
9737         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
9738
9739         return hclge_cmd_send(&hdev->hw, &desc, 1);
9740 }
9741
9742 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9743 {
9744         struct hclge_vport *vport = hclge_get_vport(handle);
9745
9746         return hclge_set_vport_mtu(vport, new_mtu);
9747 }
9748
9749 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9750 {
9751         struct hclge_dev *hdev = vport->back;
9752         int i, max_frm_size, ret;
9753
9754         /* HW supprt 2 layer vlan */
9755         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9756         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9757             max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
9758                 return -EINVAL;
9759
9760         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9761         mutex_lock(&hdev->vport_lock);
9762         /* VF's mps must fit within hdev->mps */
9763         if (vport->vport_id && max_frm_size > hdev->mps) {
9764                 mutex_unlock(&hdev->vport_lock);
9765                 return -EINVAL;
9766         } else if (vport->vport_id) {
9767                 vport->mps = max_frm_size;
9768                 mutex_unlock(&hdev->vport_lock);
9769                 return 0;
9770         }
9771
9772         /* PF's mps must be greater then VF's mps */
9773         for (i = 1; i < hdev->num_alloc_vport; i++)
9774                 if (max_frm_size < hdev->vport[i].mps) {
9775                         mutex_unlock(&hdev->vport_lock);
9776                         return -EINVAL;
9777                 }
9778
9779         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9780
9781         ret = hclge_set_mac_mtu(hdev, max_frm_size);
9782         if (ret) {
9783                 dev_err(&hdev->pdev->dev,
9784                         "Change mtu fail, ret =%d\n", ret);
9785                 goto out;
9786         }
9787
9788         hdev->mps = max_frm_size;
9789         vport->mps = max_frm_size;
9790
9791         ret = hclge_buffer_alloc(hdev);
9792         if (ret)
9793                 dev_err(&hdev->pdev->dev,
9794                         "Allocate buffer fail, ret =%d\n", ret);
9795
9796 out:
9797         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
9798         mutex_unlock(&hdev->vport_lock);
9799         return ret;
9800 }
9801
9802 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
9803                                     bool enable)
9804 {
9805         struct hclge_reset_tqp_queue_cmd *req;
9806         struct hclge_desc desc;
9807         int ret;
9808
9809         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
9810
9811         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9812         req->tqp_id = cpu_to_le16(queue_id);
9813         if (enable)
9814                 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
9815
9816         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9817         if (ret) {
9818                 dev_err(&hdev->pdev->dev,
9819                         "Send tqp reset cmd error, status =%d\n", ret);
9820                 return ret;
9821         }
9822
9823         return 0;
9824 }
9825
9826 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
9827 {
9828         struct hclge_reset_tqp_queue_cmd *req;
9829         struct hclge_desc desc;
9830         int ret;
9831
9832         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
9833
9834         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9835         req->tqp_id = cpu_to_le16(queue_id);
9836
9837         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9838         if (ret) {
9839                 dev_err(&hdev->pdev->dev,
9840                         "Get reset status error, status =%d\n", ret);
9841                 return ret;
9842         }
9843
9844         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
9845 }
9846
9847 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
9848 {
9849         struct hnae3_queue *queue;
9850         struct hclge_tqp *tqp;
9851
9852         queue = handle->kinfo.tqp[queue_id];
9853         tqp = container_of(queue, struct hclge_tqp, q);
9854
9855         return tqp->index;
9856 }
9857
9858 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
9859 {
9860         struct hclge_vport *vport = hclge_get_vport(handle);
9861         struct hclge_dev *hdev = vport->back;
9862         int reset_try_times = 0;
9863         int reset_status;
9864         u16 queue_gid;
9865         int ret;
9866
9867         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
9868
9869         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
9870         if (ret) {
9871                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
9872                 return ret;
9873         }
9874
9875         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9876         if (ret) {
9877                 dev_err(&hdev->pdev->dev,
9878                         "Send reset tqp cmd fail, ret = %d\n", ret);
9879                 return ret;
9880         }
9881
9882         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9883                 reset_status = hclge_get_reset_status(hdev, queue_gid);
9884                 if (reset_status)
9885                         break;
9886
9887                 /* Wait for tqp hw reset */
9888                 usleep_range(1000, 1200);
9889         }
9890
9891         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9892                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
9893                 return ret;
9894         }
9895
9896         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9897         if (ret)
9898                 dev_err(&hdev->pdev->dev,
9899                         "Deassert the soft reset fail, ret = %d\n", ret);
9900
9901         return ret;
9902 }
9903
9904 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
9905 {
9906         struct hnae3_handle *handle = &vport->nic;
9907         struct hclge_dev *hdev = vport->back;
9908         int reset_try_times = 0;
9909         int reset_status;
9910         u16 queue_gid;
9911         int ret;
9912
9913         if (queue_id >= handle->kinfo.num_tqps) {
9914                 dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
9915                          queue_id);
9916                 return;
9917         }
9918
9919         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
9920
9921         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9922         if (ret) {
9923                 dev_warn(&hdev->pdev->dev,
9924                          "Send reset tqp cmd fail, ret = %d\n", ret);
9925                 return;
9926         }
9927
9928         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9929                 reset_status = hclge_get_reset_status(hdev, queue_gid);
9930                 if (reset_status)
9931                         break;
9932
9933                 /* Wait for tqp hw reset */
9934                 usleep_range(1000, 1200);
9935         }
9936
9937         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9938                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
9939                 return;
9940         }
9941
9942         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9943         if (ret)
9944                 dev_warn(&hdev->pdev->dev,
9945                          "Deassert the soft reset fail, ret = %d\n", ret);
9946 }
9947
9948 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
9949 {
9950         struct hclge_vport *vport = hclge_get_vport(handle);
9951         struct hclge_dev *hdev = vport->back;
9952
9953         return hdev->fw_version;
9954 }
9955
9956 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9957 {
9958         struct phy_device *phydev = hdev->hw.mac.phydev;
9959
9960         if (!phydev)
9961                 return;
9962
9963         phy_set_asym_pause(phydev, rx_en, tx_en);
9964 }
9965
9966 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9967 {
9968         int ret;
9969
9970         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
9971                 return 0;
9972
9973         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
9974         if (ret)
9975                 dev_err(&hdev->pdev->dev,
9976                         "configure pauseparam error, ret = %d.\n", ret);
9977
9978         return ret;
9979 }
9980
9981 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
9982 {
9983         struct phy_device *phydev = hdev->hw.mac.phydev;
9984         u16 remote_advertising = 0;
9985         u16 local_advertising;
9986         u32 rx_pause, tx_pause;
9987         u8 flowctl;
9988
9989         if (!phydev->link || !phydev->autoneg)
9990                 return 0;
9991
9992         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
9993
9994         if (phydev->pause)
9995                 remote_advertising = LPA_PAUSE_CAP;
9996
9997         if (phydev->asym_pause)
9998                 remote_advertising |= LPA_PAUSE_ASYM;
9999
10000         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10001                                            remote_advertising);
10002         tx_pause = flowctl & FLOW_CTRL_TX;
10003         rx_pause = flowctl & FLOW_CTRL_RX;
10004
10005         if (phydev->duplex == HCLGE_MAC_HALF) {
10006                 tx_pause = 0;
10007                 rx_pause = 0;
10008         }
10009
10010         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10011 }
10012
10013 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10014                                  u32 *rx_en, u32 *tx_en)
10015 {
10016         struct hclge_vport *vport = hclge_get_vport(handle);
10017         struct hclge_dev *hdev = vport->back;
10018         struct phy_device *phydev = hdev->hw.mac.phydev;
10019
10020         *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
10021
10022         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10023                 *rx_en = 0;
10024                 *tx_en = 0;
10025                 return;
10026         }
10027
10028         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10029                 *rx_en = 1;
10030                 *tx_en = 0;
10031         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10032                 *tx_en = 1;
10033                 *rx_en = 0;
10034         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10035                 *rx_en = 1;
10036                 *tx_en = 1;
10037         } else {
10038                 *rx_en = 0;
10039                 *tx_en = 0;
10040         }
10041 }
10042
10043 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10044                                          u32 rx_en, u32 tx_en)
10045 {
10046         if (rx_en && tx_en)
10047                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
10048         else if (rx_en && !tx_en)
10049                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10050         else if (!rx_en && tx_en)
10051                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10052         else
10053                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
10054
10055         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10056 }
10057
10058 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10059                                 u32 rx_en, u32 tx_en)
10060 {
10061         struct hclge_vport *vport = hclge_get_vport(handle);
10062         struct hclge_dev *hdev = vport->back;
10063         struct phy_device *phydev = hdev->hw.mac.phydev;
10064         u32 fc_autoneg;
10065
10066         if (phydev) {
10067                 fc_autoneg = hclge_get_autoneg(handle);
10068                 if (auto_neg != fc_autoneg) {
10069                         dev_info(&hdev->pdev->dev,
10070                                  "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10071                         return -EOPNOTSUPP;
10072                 }
10073         }
10074
10075         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10076                 dev_info(&hdev->pdev->dev,
10077                          "Priority flow control enabled. Cannot set link flow control.\n");
10078                 return -EOPNOTSUPP;
10079         }
10080
10081         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10082
10083         hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10084
10085         if (!auto_neg)
10086                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10087
10088         if (phydev)
10089                 return phy_start_aneg(phydev);
10090
10091         return -EOPNOTSUPP;
10092 }
10093
10094 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10095                                           u8 *auto_neg, u32 *speed, u8 *duplex)
10096 {
10097         struct hclge_vport *vport = hclge_get_vport(handle);
10098         struct hclge_dev *hdev = vport->back;
10099
10100         if (speed)
10101                 *speed = hdev->hw.mac.speed;
10102         if (duplex)
10103                 *duplex = hdev->hw.mac.duplex;
10104         if (auto_neg)
10105                 *auto_neg = hdev->hw.mac.autoneg;
10106 }
10107
10108 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
10109                                  u8 *module_type)
10110 {
10111         struct hclge_vport *vport = hclge_get_vport(handle);
10112         struct hclge_dev *hdev = vport->back;
10113
10114         /* When nic is down, the service task is not running, doesn't update
10115          * the port information per second. Query the port information before
10116          * return the media type, ensure getting the correct media information.
10117          */
10118         hclge_update_port_info(hdev);
10119
10120         if (media_type)
10121                 *media_type = hdev->hw.mac.media_type;
10122
10123         if (module_type)
10124                 *module_type = hdev->hw.mac.module_type;
10125 }
10126
10127 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
10128                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
10129 {
10130         struct hclge_vport *vport = hclge_get_vport(handle);
10131         struct hclge_dev *hdev = vport->back;
10132         struct phy_device *phydev = hdev->hw.mac.phydev;
10133         int mdix_ctrl, mdix, is_resolved;
10134         unsigned int retval;
10135
10136         if (!phydev) {
10137                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10138                 *tp_mdix = ETH_TP_MDI_INVALID;
10139                 return;
10140         }
10141
10142         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
10143
10144         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
10145         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
10146                                     HCLGE_PHY_MDIX_CTRL_S);
10147
10148         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
10149         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
10150         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
10151
10152         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
10153
10154         switch (mdix_ctrl) {
10155         case 0x0:
10156                 *tp_mdix_ctrl = ETH_TP_MDI;
10157                 break;
10158         case 0x1:
10159                 *tp_mdix_ctrl = ETH_TP_MDI_X;
10160                 break;
10161         case 0x3:
10162                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
10163                 break;
10164         default:
10165                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10166                 break;
10167         }
10168
10169         if (!is_resolved)
10170                 *tp_mdix = ETH_TP_MDI_INVALID;
10171         else if (mdix)
10172                 *tp_mdix = ETH_TP_MDI_X;
10173         else
10174                 *tp_mdix = ETH_TP_MDI;
10175 }
10176
10177 static void hclge_info_show(struct hclge_dev *hdev)
10178 {
10179         struct device *dev = &hdev->pdev->dev;
10180
10181         dev_info(dev, "PF info begin:\n");
10182
10183         dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
10184         dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
10185         dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
10186         dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
10187         dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
10188         dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
10189         dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
10190         dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
10191         dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
10192         dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
10193         dev_info(dev, "This is %s PF\n",
10194                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
10195         dev_info(dev, "DCB %s\n",
10196                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
10197         dev_info(dev, "MQPRIO %s\n",
10198                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
10199
10200         dev_info(dev, "PF info end.\n");
10201 }
10202
10203 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
10204                                           struct hclge_vport *vport)
10205 {
10206         struct hnae3_client *client = vport->nic.client;
10207         struct hclge_dev *hdev = ae_dev->priv;
10208         int rst_cnt = hdev->rst_stats.reset_cnt;
10209         int ret;
10210
10211         ret = client->ops->init_instance(&vport->nic);
10212         if (ret)
10213                 return ret;
10214
10215         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10216         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10217             rst_cnt != hdev->rst_stats.reset_cnt) {
10218                 ret = -EBUSY;
10219                 goto init_nic_err;
10220         }
10221
10222         /* Enable nic hw error interrupts */
10223         ret = hclge_config_nic_hw_error(hdev, true);
10224         if (ret) {
10225                 dev_err(&ae_dev->pdev->dev,
10226                         "fail(%d) to enable hw error interrupts\n", ret);
10227                 goto init_nic_err;
10228         }
10229
10230         hnae3_set_client_init_flag(client, ae_dev, 1);
10231
10232         if (netif_msg_drv(&hdev->vport->nic))
10233                 hclge_info_show(hdev);
10234
10235         return ret;
10236
10237 init_nic_err:
10238         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10239         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10240                 msleep(HCLGE_WAIT_RESET_DONE);
10241
10242         client->ops->uninit_instance(&vport->nic, 0);
10243
10244         return ret;
10245 }
10246
10247 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
10248                                            struct hclge_vport *vport)
10249 {
10250         struct hclge_dev *hdev = ae_dev->priv;
10251         struct hnae3_client *client;
10252         int rst_cnt;
10253         int ret;
10254
10255         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
10256             !hdev->nic_client)
10257                 return 0;
10258
10259         client = hdev->roce_client;
10260         ret = hclge_init_roce_base_info(vport);
10261         if (ret)
10262                 return ret;
10263
10264         rst_cnt = hdev->rst_stats.reset_cnt;
10265         ret = client->ops->init_instance(&vport->roce);
10266         if (ret)
10267                 return ret;
10268
10269         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10270         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10271             rst_cnt != hdev->rst_stats.reset_cnt) {
10272                 ret = -EBUSY;
10273                 goto init_roce_err;
10274         }
10275
10276         /* Enable roce ras interrupts */
10277         ret = hclge_config_rocee_ras_interrupt(hdev, true);
10278         if (ret) {
10279                 dev_err(&ae_dev->pdev->dev,
10280                         "fail(%d) to enable roce ras interrupts\n", ret);
10281                 goto init_roce_err;
10282         }
10283
10284         hnae3_set_client_init_flag(client, ae_dev, 1);
10285
10286         return 0;
10287
10288 init_roce_err:
10289         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10290         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10291                 msleep(HCLGE_WAIT_RESET_DONE);
10292
10293         hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
10294
10295         return ret;
10296 }
10297
10298 static int hclge_init_client_instance(struct hnae3_client *client,
10299                                       struct hnae3_ae_dev *ae_dev)
10300 {
10301         struct hclge_dev *hdev = ae_dev->priv;
10302         struct hclge_vport *vport;
10303         int i, ret;
10304
10305         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
10306                 vport = &hdev->vport[i];
10307
10308                 switch (client->type) {
10309                 case HNAE3_CLIENT_KNIC:
10310                         hdev->nic_client = client;
10311                         vport->nic.client = client;
10312                         ret = hclge_init_nic_client_instance(ae_dev, vport);
10313                         if (ret)
10314                                 goto clear_nic;
10315
10316                         ret = hclge_init_roce_client_instance(ae_dev, vport);
10317                         if (ret)
10318                                 goto clear_roce;
10319
10320                         break;
10321                 case HNAE3_CLIENT_ROCE:
10322                         if (hnae3_dev_roce_supported(hdev)) {
10323                                 hdev->roce_client = client;
10324                                 vport->roce.client = client;
10325                         }
10326
10327                         ret = hclge_init_roce_client_instance(ae_dev, vport);
10328                         if (ret)
10329                                 goto clear_roce;
10330
10331                         break;
10332                 default:
10333                         return -EINVAL;
10334                 }
10335         }
10336
10337         return 0;
10338
10339 clear_nic:
10340         hdev->nic_client = NULL;
10341         vport->nic.client = NULL;
10342         return ret;
10343 clear_roce:
10344         hdev->roce_client = NULL;
10345         vport->roce.client = NULL;
10346         return ret;
10347 }
10348
10349 static void hclge_uninit_client_instance(struct hnae3_client *client,
10350                                          struct hnae3_ae_dev *ae_dev)
10351 {
10352         struct hclge_dev *hdev = ae_dev->priv;
10353         struct hclge_vport *vport;
10354         int i;
10355
10356         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
10357                 vport = &hdev->vport[i];
10358                 if (hdev->roce_client) {
10359                         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10360                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10361                                 msleep(HCLGE_WAIT_RESET_DONE);
10362
10363                         hdev->roce_client->ops->uninit_instance(&vport->roce,
10364                                                                 0);
10365                         hdev->roce_client = NULL;
10366                         vport->roce.client = NULL;
10367                 }
10368                 if (client->type == HNAE3_CLIENT_ROCE)
10369                         return;
10370                 if (hdev->nic_client && client->ops->uninit_instance) {
10371                         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10372                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10373                                 msleep(HCLGE_WAIT_RESET_DONE);
10374
10375                         client->ops->uninit_instance(&vport->nic, 0);
10376                         hdev->nic_client = NULL;
10377                         vport->nic.client = NULL;
10378                 }
10379         }
10380 }
10381
10382 static int hclge_dev_mem_map(struct hclge_dev *hdev)
10383 {
10384 #define HCLGE_MEM_BAR           4
10385
10386         struct pci_dev *pdev = hdev->pdev;
10387         struct hclge_hw *hw = &hdev->hw;
10388
10389         /* for device does not have device memory, return directly */
10390         if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
10391                 return 0;
10392
10393         hw->mem_base = devm_ioremap_wc(&pdev->dev,
10394                                        pci_resource_start(pdev, HCLGE_MEM_BAR),
10395                                        pci_resource_len(pdev, HCLGE_MEM_BAR));
10396         if (!hw->mem_base) {
10397                 dev_err(&pdev->dev, "failed to map device memory\n");
10398                 return -EFAULT;
10399         }
10400
10401         return 0;
10402 }
10403
10404 static int hclge_pci_init(struct hclge_dev *hdev)
10405 {
10406         struct pci_dev *pdev = hdev->pdev;
10407         struct hclge_hw *hw;
10408         int ret;
10409
10410         ret = pci_enable_device(pdev);
10411         if (ret) {
10412                 dev_err(&pdev->dev, "failed to enable PCI device\n");
10413                 return ret;
10414         }
10415
10416         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10417         if (ret) {
10418                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10419                 if (ret) {
10420                         dev_err(&pdev->dev,
10421                                 "can't set consistent PCI DMA");
10422                         goto err_disable_device;
10423                 }
10424                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
10425         }
10426
10427         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
10428         if (ret) {
10429                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
10430                 goto err_disable_device;
10431         }
10432
10433         pci_set_master(pdev);
10434         hw = &hdev->hw;
10435         hw->io_base = pcim_iomap(pdev, 2, 0);
10436         if (!hw->io_base) {
10437                 dev_err(&pdev->dev, "Can't map configuration register space\n");
10438                 ret = -ENOMEM;
10439                 goto err_clr_master;
10440         }
10441
10442         ret = hclge_dev_mem_map(hdev);
10443         if (ret)
10444                 goto err_unmap_io_base;
10445
10446         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
10447
10448         return 0;
10449
10450 err_unmap_io_base:
10451         pcim_iounmap(pdev, hdev->hw.io_base);
10452 err_clr_master:
10453         pci_clear_master(pdev);
10454         pci_release_regions(pdev);
10455 err_disable_device:
10456         pci_disable_device(pdev);
10457
10458         return ret;
10459 }
10460
10461 static void hclge_pci_uninit(struct hclge_dev *hdev)
10462 {
10463         struct pci_dev *pdev = hdev->pdev;
10464
10465         if (hdev->hw.mem_base)
10466                 devm_iounmap(&pdev->dev, hdev->hw.mem_base);
10467
10468         pcim_iounmap(pdev, hdev->hw.io_base);
10469         pci_free_irq_vectors(pdev);
10470         pci_clear_master(pdev);
10471         pci_release_mem_regions(pdev);
10472         pci_disable_device(pdev);
10473 }
10474
10475 static void hclge_state_init(struct hclge_dev *hdev)
10476 {
10477         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
10478         set_bit(HCLGE_STATE_DOWN, &hdev->state);
10479         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
10480         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10481         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
10482         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
10483         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
10484 }
10485
10486 static void hclge_state_uninit(struct hclge_dev *hdev)
10487 {
10488         set_bit(HCLGE_STATE_DOWN, &hdev->state);
10489         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
10490
10491         if (hdev->reset_timer.function)
10492                 del_timer_sync(&hdev->reset_timer);
10493         if (hdev->service_task.work.func)
10494                 cancel_delayed_work_sync(&hdev->service_task);
10495 }
10496
10497 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
10498 {
10499 #define HCLGE_FLR_RETRY_WAIT_MS 500
10500 #define HCLGE_FLR_RETRY_CNT     5
10501
10502         struct hclge_dev *hdev = ae_dev->priv;
10503         int retry_cnt = 0;
10504         int ret;
10505
10506 retry:
10507         down(&hdev->reset_sem);
10508         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10509         hdev->reset_type = HNAE3_FLR_RESET;
10510         ret = hclge_reset_prepare(hdev);
10511         if (ret || hdev->reset_pending) {
10512                 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
10513                         ret);
10514                 if (hdev->reset_pending ||
10515                     retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
10516                         dev_err(&hdev->pdev->dev,
10517                                 "reset_pending:0x%lx, retry_cnt:%d\n",
10518                                 hdev->reset_pending, retry_cnt);
10519                         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10520                         up(&hdev->reset_sem);
10521                         msleep(HCLGE_FLR_RETRY_WAIT_MS);
10522                         goto retry;
10523                 }
10524         }
10525
10526         /* disable misc vector before FLR done */
10527         hclge_enable_vector(&hdev->misc_vector, false);
10528         set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
10529         hdev->rst_stats.flr_rst_cnt++;
10530 }
10531
10532 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
10533 {
10534         struct hclge_dev *hdev = ae_dev->priv;
10535         int ret;
10536
10537         hclge_enable_vector(&hdev->misc_vector, true);
10538
10539         ret = hclge_reset_rebuild(hdev);
10540         if (ret)
10541                 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
10542
10543         hdev->reset_type = HNAE3_NONE_RESET;
10544         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10545         up(&hdev->reset_sem);
10546 }
10547
10548 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
10549 {
10550         u16 i;
10551
10552         for (i = 0; i < hdev->num_alloc_vport; i++) {
10553                 struct hclge_vport *vport = &hdev->vport[i];
10554                 int ret;
10555
10556                  /* Send cmd to clear VF's FUNC_RST_ING */
10557                 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
10558                 if (ret)
10559                         dev_warn(&hdev->pdev->dev,
10560                                  "clear vf(%u) rst failed %d!\n",
10561                                  vport->vport_id, ret);
10562         }
10563 }
10564
10565 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
10566 {
10567         struct pci_dev *pdev = ae_dev->pdev;
10568         struct hclge_dev *hdev;
10569         int ret;
10570
10571         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
10572         if (!hdev)
10573                 return -ENOMEM;
10574
10575         hdev->pdev = pdev;
10576         hdev->ae_dev = ae_dev;
10577         hdev->reset_type = HNAE3_NONE_RESET;
10578         hdev->reset_level = HNAE3_FUNC_RESET;
10579         ae_dev->priv = hdev;
10580
10581         /* HW supprt 2 layer vlan */
10582         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10583
10584         mutex_init(&hdev->vport_lock);
10585         spin_lock_init(&hdev->fd_rule_lock);
10586         sema_init(&hdev->reset_sem, 1);
10587
10588         ret = hclge_pci_init(hdev);
10589         if (ret)
10590                 goto out;
10591
10592         /* Firmware command queue initialize */
10593         ret = hclge_cmd_queue_init(hdev);
10594         if (ret)
10595                 goto err_pci_uninit;
10596
10597         /* Firmware command initialize */
10598         ret = hclge_cmd_init(hdev);
10599         if (ret)
10600                 goto err_cmd_uninit;
10601
10602         ret = hclge_get_cap(hdev);
10603         if (ret)
10604                 goto err_cmd_uninit;
10605
10606         ret = hclge_query_dev_specs(hdev);
10607         if (ret) {
10608                 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
10609                         ret);
10610                 goto err_cmd_uninit;
10611         }
10612
10613         ret = hclge_configure(hdev);
10614         if (ret) {
10615                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
10616                 goto err_cmd_uninit;
10617         }
10618
10619         ret = hclge_init_msi(hdev);
10620         if (ret) {
10621                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
10622                 goto err_cmd_uninit;
10623         }
10624
10625         ret = hclge_misc_irq_init(hdev);
10626         if (ret)
10627                 goto err_msi_uninit;
10628
10629         ret = hclge_alloc_tqps(hdev);
10630         if (ret) {
10631                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
10632                 goto err_msi_irq_uninit;
10633         }
10634
10635         ret = hclge_alloc_vport(hdev);
10636         if (ret)
10637                 goto err_msi_irq_uninit;
10638
10639         ret = hclge_map_tqp(hdev);
10640         if (ret)
10641                 goto err_msi_irq_uninit;
10642
10643         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
10644                 ret = hclge_mac_mdio_config(hdev);
10645                 if (ret)
10646                         goto err_msi_irq_uninit;
10647         }
10648
10649         ret = hclge_init_umv_space(hdev);
10650         if (ret)
10651                 goto err_mdiobus_unreg;
10652
10653         ret = hclge_mac_init(hdev);
10654         if (ret) {
10655                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10656                 goto err_mdiobus_unreg;
10657         }
10658
10659         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10660         if (ret) {
10661                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10662                 goto err_mdiobus_unreg;
10663         }
10664
10665         ret = hclge_config_gro(hdev, true);
10666         if (ret)
10667                 goto err_mdiobus_unreg;
10668
10669         ret = hclge_init_vlan_config(hdev);
10670         if (ret) {
10671                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10672                 goto err_mdiobus_unreg;
10673         }
10674
10675         ret = hclge_tm_schd_init(hdev);
10676         if (ret) {
10677                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
10678                 goto err_mdiobus_unreg;
10679         }
10680
10681         ret = hclge_rss_init_cfg(hdev);
10682         if (ret) {
10683                 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
10684                 goto err_mdiobus_unreg;
10685         }
10686
10687         ret = hclge_rss_init_hw(hdev);
10688         if (ret) {
10689                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10690                 goto err_mdiobus_unreg;
10691         }
10692
10693         ret = init_mgr_tbl(hdev);
10694         if (ret) {
10695                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
10696                 goto err_mdiobus_unreg;
10697         }
10698
10699         ret = hclge_init_fd_config(hdev);
10700         if (ret) {
10701                 dev_err(&pdev->dev,
10702                         "fd table init fail, ret=%d\n", ret);
10703                 goto err_mdiobus_unreg;
10704         }
10705
10706         INIT_KFIFO(hdev->mac_tnl_log);
10707
10708         hclge_dcb_ops_set(hdev);
10709
10710         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
10711         INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
10712
10713         /* Setup affinity after service timer setup because add_timer_on
10714          * is called in affinity notify.
10715          */
10716         hclge_misc_affinity_setup(hdev);
10717
10718         hclge_clear_all_event_cause(hdev);
10719         hclge_clear_resetting_state(hdev);
10720
10721         /* Log and clear the hw errors those already occurred */
10722         hclge_handle_all_hns_hw_errors(ae_dev);
10723
10724         /* request delayed reset for the error recovery because an immediate
10725          * global reset on a PF affecting pending initialization of other PFs
10726          */
10727         if (ae_dev->hw_err_reset_req) {
10728                 enum hnae3_reset_type reset_level;
10729
10730                 reset_level = hclge_get_reset_level(ae_dev,
10731                                                     &ae_dev->hw_err_reset_req);
10732                 hclge_set_def_reset_request(ae_dev, reset_level);
10733                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10734         }
10735
10736         /* Enable MISC vector(vector0) */
10737         hclge_enable_vector(&hdev->misc_vector, true);
10738
10739         hclge_state_init(hdev);
10740         hdev->last_reset_time = jiffies;
10741
10742         dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10743                  HCLGE_DRIVER_NAME);
10744
10745         hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10746
10747         return 0;
10748
10749 err_mdiobus_unreg:
10750         if (hdev->hw.mac.phydev)
10751                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
10752 err_msi_irq_uninit:
10753         hclge_misc_irq_uninit(hdev);
10754 err_msi_uninit:
10755         pci_free_irq_vectors(pdev);
10756 err_cmd_uninit:
10757         hclge_cmd_uninit(hdev);
10758 err_pci_uninit:
10759         pcim_iounmap(pdev, hdev->hw.io_base);
10760         pci_clear_master(pdev);
10761         pci_release_regions(pdev);
10762         pci_disable_device(pdev);
10763 out:
10764         mutex_destroy(&hdev->vport_lock);
10765         return ret;
10766 }
10767
10768 static void hclge_stats_clear(struct hclge_dev *hdev)
10769 {
10770         memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
10771 }
10772
10773 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10774 {
10775         return hclge_config_switch_param(hdev, vf, enable,
10776                                          HCLGE_SWITCH_ANTI_SPOOF_MASK);
10777 }
10778
10779 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10780 {
10781         return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10782                                           HCLGE_FILTER_FE_NIC_INGRESS_B,
10783                                           enable, vf);
10784 }
10785
10786 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10787 {
10788         int ret;
10789
10790         ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10791         if (ret) {
10792                 dev_err(&hdev->pdev->dev,
10793                         "Set vf %d mac spoof check %s failed, ret=%d\n",
10794                         vf, enable ? "on" : "off", ret);
10795                 return ret;
10796         }
10797
10798         ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10799         if (ret)
10800                 dev_err(&hdev->pdev->dev,
10801                         "Set vf %d vlan spoof check %s failed, ret=%d\n",
10802                         vf, enable ? "on" : "off", ret);
10803
10804         return ret;
10805 }
10806
10807 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10808                                  bool enable)
10809 {
10810         struct hclge_vport *vport = hclge_get_vport(handle);
10811         struct hclge_dev *hdev = vport->back;
10812         u32 new_spoofchk = enable ? 1 : 0;
10813         int ret;
10814
10815         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10816                 return -EOPNOTSUPP;
10817
10818         vport = hclge_get_vf_vport(hdev, vf);
10819         if (!vport)
10820                 return -EINVAL;
10821
10822         if (vport->vf_info.spoofchk == new_spoofchk)
10823                 return 0;
10824
10825         if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
10826                 dev_warn(&hdev->pdev->dev,
10827                          "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
10828                          vf);
10829         else if (enable && hclge_is_umv_space_full(vport, true))
10830                 dev_warn(&hdev->pdev->dev,
10831                          "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
10832                          vf);
10833
10834         ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
10835         if (ret)
10836                 return ret;
10837
10838         vport->vf_info.spoofchk = new_spoofchk;
10839         return 0;
10840 }
10841
10842 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
10843 {
10844         struct hclge_vport *vport = hdev->vport;
10845         int ret;
10846         int i;
10847
10848         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10849                 return 0;
10850
10851         /* resume the vf spoof check state after reset */
10852         for (i = 0; i < hdev->num_alloc_vport; i++) {
10853                 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
10854                                                vport->vf_info.spoofchk);
10855                 if (ret)
10856                         return ret;
10857
10858                 vport++;
10859         }
10860
10861         return 0;
10862 }
10863
10864 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
10865 {
10866         struct hclge_vport *vport = hclge_get_vport(handle);
10867         struct hclge_dev *hdev = vport->back;
10868         struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
10869         u32 new_trusted = enable ? 1 : 0;
10870         bool en_bc_pmc;
10871         int ret;
10872
10873         vport = hclge_get_vf_vport(hdev, vf);
10874         if (!vport)
10875                 return -EINVAL;
10876
10877         if (vport->vf_info.trusted == new_trusted)
10878                 return 0;
10879
10880         /* Disable promisc mode for VF if it is not trusted any more. */
10881         if (!enable && vport->vf_info.promisc_enable) {
10882                 en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
10883                 ret = hclge_set_vport_promisc_mode(vport, false, false,
10884                                                    en_bc_pmc);
10885                 if (ret)
10886                         return ret;
10887                 vport->vf_info.promisc_enable = 0;
10888                 hclge_inform_vf_promisc_info(vport);
10889         }
10890
10891         vport->vf_info.trusted = new_trusted;
10892
10893         return 0;
10894 }
10895
10896 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
10897 {
10898         int ret;
10899         int vf;
10900
10901         /* reset vf rate to default value */
10902         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10903                 struct hclge_vport *vport = &hdev->vport[vf];
10904
10905                 vport->vf_info.max_tx_rate = 0;
10906                 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
10907                 if (ret)
10908                         dev_err(&hdev->pdev->dev,
10909                                 "vf%d failed to reset to default, ret=%d\n",
10910                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
10911         }
10912 }
10913
10914 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
10915                                      int min_tx_rate, int max_tx_rate)
10916 {
10917         if (min_tx_rate != 0 ||
10918             max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
10919                 dev_err(&hdev->pdev->dev,
10920                         "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
10921                         min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
10922                 return -EINVAL;
10923         }
10924
10925         return 0;
10926 }
10927
10928 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
10929                              int min_tx_rate, int max_tx_rate, bool force)
10930 {
10931         struct hclge_vport *vport = hclge_get_vport(handle);
10932         struct hclge_dev *hdev = vport->back;
10933         int ret;
10934
10935         ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
10936         if (ret)
10937                 return ret;
10938
10939         vport = hclge_get_vf_vport(hdev, vf);
10940         if (!vport)
10941                 return -EINVAL;
10942
10943         if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
10944                 return 0;
10945
10946         ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
10947         if (ret)
10948                 return ret;
10949
10950         vport->vf_info.max_tx_rate = max_tx_rate;
10951
10952         return 0;
10953 }
10954
10955 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
10956 {
10957         struct hnae3_handle *handle = &hdev->vport->nic;
10958         struct hclge_vport *vport;
10959         int ret;
10960         int vf;
10961
10962         /* resume the vf max_tx_rate after reset */
10963         for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
10964                 vport = hclge_get_vf_vport(hdev, vf);
10965                 if (!vport)
10966                         return -EINVAL;
10967
10968                 /* zero means max rate, after reset, firmware already set it to
10969                  * max rate, so just continue.
10970                  */
10971                 if (!vport->vf_info.max_tx_rate)
10972                         continue;
10973
10974                 ret = hclge_set_vf_rate(handle, vf, 0,
10975                                         vport->vf_info.max_tx_rate, true);
10976                 if (ret) {
10977                         dev_err(&hdev->pdev->dev,
10978                                 "vf%d failed to resume tx_rate:%u, ret=%d\n",
10979                                 vf, vport->vf_info.max_tx_rate, ret);
10980                         return ret;
10981                 }
10982         }
10983
10984         return 0;
10985 }
10986
10987 static void hclge_reset_vport_state(struct hclge_dev *hdev)
10988 {
10989         struct hclge_vport *vport = hdev->vport;
10990         int i;
10991
10992         for (i = 0; i < hdev->num_alloc_vport; i++) {
10993                 hclge_vport_stop(vport);
10994                 vport++;
10995         }
10996 }
10997
10998 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
10999 {
11000         struct hclge_dev *hdev = ae_dev->priv;
11001         struct pci_dev *pdev = ae_dev->pdev;
11002         int ret;
11003
11004         set_bit(HCLGE_STATE_DOWN, &hdev->state);
11005
11006         hclge_stats_clear(hdev);
11007         /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11008          * so here should not clean table in memory.
11009          */
11010         if (hdev->reset_type == HNAE3_IMP_RESET ||
11011             hdev->reset_type == HNAE3_GLOBAL_RESET) {
11012                 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11013                 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11014                 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11015                 hclge_reset_umv_space(hdev);
11016         }
11017
11018         ret = hclge_cmd_init(hdev);
11019         if (ret) {
11020                 dev_err(&pdev->dev, "Cmd queue init failed\n");
11021                 return ret;
11022         }
11023
11024         ret = hclge_map_tqp(hdev);
11025         if (ret) {
11026                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11027                 return ret;
11028         }
11029
11030         ret = hclge_mac_init(hdev);
11031         if (ret) {
11032                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11033                 return ret;
11034         }
11035
11036         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11037         if (ret) {
11038                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11039                 return ret;
11040         }
11041
11042         ret = hclge_config_gro(hdev, true);
11043         if (ret)
11044                 return ret;
11045
11046         ret = hclge_init_vlan_config(hdev);
11047         if (ret) {
11048                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11049                 return ret;
11050         }
11051
11052         ret = hclge_tm_init_hw(hdev, true);
11053         if (ret) {
11054                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
11055                 return ret;
11056         }
11057
11058         ret = hclge_rss_init_hw(hdev);
11059         if (ret) {
11060                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11061                 return ret;
11062         }
11063
11064         ret = init_mgr_tbl(hdev);
11065         if (ret) {
11066                 dev_err(&pdev->dev,
11067                         "failed to reinit manager table, ret = %d\n", ret);
11068                 return ret;
11069         }
11070
11071         ret = hclge_init_fd_config(hdev);
11072         if (ret) {
11073                 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
11074                 return ret;
11075         }
11076
11077         /* Log and clear the hw errors those already occurred */
11078         hclge_handle_all_hns_hw_errors(ae_dev);
11079
11080         /* Re-enable the hw error interrupts because
11081          * the interrupts get disabled on global reset.
11082          */
11083         ret = hclge_config_nic_hw_error(hdev, true);
11084         if (ret) {
11085                 dev_err(&pdev->dev,
11086                         "fail(%d) to re-enable NIC hw error interrupts\n",
11087                         ret);
11088                 return ret;
11089         }
11090
11091         if (hdev->roce_client) {
11092                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
11093                 if (ret) {
11094                         dev_err(&pdev->dev,
11095                                 "fail(%d) to re-enable roce ras interrupts\n",
11096                                 ret);
11097                         return ret;
11098                 }
11099         }
11100
11101         hclge_reset_vport_state(hdev);
11102         ret = hclge_reset_vport_spoofchk(hdev);
11103         if (ret)
11104                 return ret;
11105
11106         ret = hclge_resume_vf_rate(hdev);
11107         if (ret)
11108                 return ret;
11109
11110         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
11111                  HCLGE_DRIVER_NAME);
11112
11113         return 0;
11114 }
11115
11116 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
11117 {
11118         struct hclge_dev *hdev = ae_dev->priv;
11119         struct hclge_mac *mac = &hdev->hw.mac;
11120
11121         hclge_reset_vf_rate(hdev);
11122         hclge_clear_vf_vlan(hdev);
11123         hclge_misc_affinity_teardown(hdev);
11124         hclge_state_uninit(hdev);
11125         hclge_uninit_mac_table(hdev);
11126
11127         if (mac->phydev)
11128                 mdiobus_unregister(mac->mdio_bus);
11129
11130         /* Disable MISC vector(vector0) */
11131         hclge_enable_vector(&hdev->misc_vector, false);
11132         synchronize_irq(hdev->misc_vector.vector_irq);
11133
11134         /* Disable all hw interrupts */
11135         hclge_config_mac_tnl_int(hdev, false);
11136         hclge_config_nic_hw_error(hdev, false);
11137         hclge_config_rocee_ras_interrupt(hdev, false);
11138
11139         hclge_cmd_uninit(hdev);
11140         hclge_misc_irq_uninit(hdev);
11141         hclge_pci_uninit(hdev);
11142         mutex_destroy(&hdev->vport_lock);
11143         hclge_uninit_vport_vlan_table(hdev);
11144         ae_dev->priv = NULL;
11145 }
11146
11147 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
11148 {
11149         struct hclge_vport *vport = hclge_get_vport(handle);
11150         struct hclge_dev *hdev = vport->back;
11151
11152         return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
11153 }
11154
11155 static void hclge_get_channels(struct hnae3_handle *handle,
11156                                struct ethtool_channels *ch)
11157 {
11158         ch->max_combined = hclge_get_max_channels(handle);
11159         ch->other_count = 1;
11160         ch->max_other = 1;
11161         ch->combined_count = handle->kinfo.rss_size;
11162 }
11163
11164 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
11165                                         u16 *alloc_tqps, u16 *max_rss_size)
11166 {
11167         struct hclge_vport *vport = hclge_get_vport(handle);
11168         struct hclge_dev *hdev = vport->back;
11169
11170         *alloc_tqps = vport->alloc_tqps;
11171         *max_rss_size = hdev->pf_rss_size_max;
11172 }
11173
11174 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
11175                               bool rxfh_configured)
11176 {
11177         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
11178         struct hclge_vport *vport = hclge_get_vport(handle);
11179         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
11180         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
11181         struct hclge_dev *hdev = vport->back;
11182         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
11183         u16 cur_rss_size = kinfo->rss_size;
11184         u16 cur_tqps = kinfo->num_tqps;
11185         u16 tc_valid[HCLGE_MAX_TC_NUM];
11186         u16 roundup_size;
11187         u32 *rss_indir;
11188         unsigned int i;
11189         int ret;
11190
11191         kinfo->req_rss_size = new_tqps_num;
11192
11193         ret = hclge_tm_vport_map_update(hdev);
11194         if (ret) {
11195                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
11196                 return ret;
11197         }
11198
11199         roundup_size = roundup_pow_of_two(kinfo->rss_size);
11200         roundup_size = ilog2(roundup_size);
11201         /* Set the RSS TC mode according to the new RSS size */
11202         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
11203                 tc_valid[i] = 0;
11204
11205                 if (!(hdev->hw_tc_map & BIT(i)))
11206                         continue;
11207
11208                 tc_valid[i] = 1;
11209                 tc_size[i] = roundup_size;
11210                 tc_offset[i] = kinfo->rss_size * i;
11211         }
11212         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
11213         if (ret)
11214                 return ret;
11215
11216         /* RSS indirection table has been configuared by user */
11217         if (rxfh_configured)
11218                 goto out;
11219
11220         /* Reinitializes the rss indirect table according to the new RSS size */
11221         rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
11222                             GFP_KERNEL);
11223         if (!rss_indir)
11224                 return -ENOMEM;
11225
11226         for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
11227                 rss_indir[i] = i % kinfo->rss_size;
11228
11229         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
11230         if (ret)
11231                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
11232                         ret);
11233
11234         kfree(rss_indir);
11235
11236 out:
11237         if (!ret)
11238                 dev_info(&hdev->pdev->dev,
11239                          "Channels changed, rss_size from %u to %u, tqps from %u to %u",
11240                          cur_rss_size, kinfo->rss_size,
11241                          cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
11242
11243         return ret;
11244 }
11245
11246 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
11247                               u32 *regs_num_64_bit)
11248 {
11249         struct hclge_desc desc;
11250         u32 total_num;
11251         int ret;
11252
11253         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
11254         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11255         if (ret) {
11256                 dev_err(&hdev->pdev->dev,
11257                         "Query register number cmd failed, ret = %d.\n", ret);
11258                 return ret;
11259         }
11260
11261         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
11262         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
11263
11264         total_num = *regs_num_32_bit + *regs_num_64_bit;
11265         if (!total_num)
11266                 return -EINVAL;
11267
11268         return 0;
11269 }
11270
11271 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11272                                  void *data)
11273 {
11274 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
11275 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
11276
11277         struct hclge_desc *desc;
11278         u32 *reg_val = data;
11279         __le32 *desc_data;
11280         int nodata_num;
11281         int cmd_num;
11282         int i, k, n;
11283         int ret;
11284
11285         if (regs_num == 0)
11286                 return 0;
11287
11288         nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
11289         cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
11290                                HCLGE_32_BIT_REG_RTN_DATANUM);
11291         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11292         if (!desc)
11293                 return -ENOMEM;
11294
11295         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
11296         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11297         if (ret) {
11298                 dev_err(&hdev->pdev->dev,
11299                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
11300                 kfree(desc);
11301                 return ret;
11302         }
11303
11304         for (i = 0; i < cmd_num; i++) {
11305                 if (i == 0) {
11306                         desc_data = (__le32 *)(&desc[i].data[0]);
11307                         n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
11308                 } else {
11309                         desc_data = (__le32 *)(&desc[i]);
11310                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
11311                 }
11312                 for (k = 0; k < n; k++) {
11313                         *reg_val++ = le32_to_cpu(*desc_data++);
11314
11315                         regs_num--;
11316                         if (!regs_num)
11317                                 break;
11318                 }
11319         }
11320
11321         kfree(desc);
11322         return 0;
11323 }
11324
11325 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11326                                  void *data)
11327 {
11328 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
11329 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
11330
11331         struct hclge_desc *desc;
11332         u64 *reg_val = data;
11333         __le64 *desc_data;
11334         int nodata_len;
11335         int cmd_num;
11336         int i, k, n;
11337         int ret;
11338
11339         if (regs_num == 0)
11340                 return 0;
11341
11342         nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
11343         cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
11344                                HCLGE_64_BIT_REG_RTN_DATANUM);
11345         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11346         if (!desc)
11347                 return -ENOMEM;
11348
11349         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
11350         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11351         if (ret) {
11352                 dev_err(&hdev->pdev->dev,
11353                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
11354                 kfree(desc);
11355                 return ret;
11356         }
11357
11358         for (i = 0; i < cmd_num; i++) {
11359                 if (i == 0) {
11360                         desc_data = (__le64 *)(&desc[i].data[0]);
11361                         n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
11362                 } else {
11363                         desc_data = (__le64 *)(&desc[i]);
11364                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
11365                 }
11366                 for (k = 0; k < n; k++) {
11367                         *reg_val++ = le64_to_cpu(*desc_data++);
11368
11369                         regs_num--;
11370                         if (!regs_num)
11371                                 break;
11372                 }
11373         }
11374
11375         kfree(desc);
11376         return 0;
11377 }
11378
11379 #define MAX_SEPARATE_NUM        4
11380 #define SEPARATOR_VALUE         0xFDFCFBFA
11381 #define REG_NUM_PER_LINE        4
11382 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
11383 #define REG_SEPARATOR_LINE      1
11384 #define REG_NUM_REMAIN_MASK     3
11385 #define BD_LIST_MAX_NUM         30
11386
11387 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
11388 {
11389         int i;
11390
11391         /* initialize command BD except the last one */
11392         for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
11393                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
11394                                            true);
11395                 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11396         }
11397
11398         /* initialize the last command BD */
11399         hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
11400
11401         return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
11402 }
11403
11404 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
11405                                     int *bd_num_list,
11406                                     u32 type_num)
11407 {
11408         u32 entries_per_desc, desc_index, index, offset, i;
11409         struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
11410         int ret;
11411
11412         ret = hclge_query_bd_num_cmd_send(hdev, desc);
11413         if (ret) {
11414                 dev_err(&hdev->pdev->dev,
11415                         "Get dfx bd num fail, status is %d.\n", ret);
11416                 return ret;
11417         }
11418
11419         entries_per_desc = ARRAY_SIZE(desc[0].data);
11420         for (i = 0; i < type_num; i++) {
11421                 offset = hclge_dfx_bd_offset_list[i];
11422                 index = offset % entries_per_desc;
11423                 desc_index = offset / entries_per_desc;
11424                 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
11425         }
11426
11427         return ret;
11428 }
11429
11430 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
11431                                   struct hclge_desc *desc_src, int bd_num,
11432                                   enum hclge_opcode_type cmd)
11433 {
11434         struct hclge_desc *desc = desc_src;
11435         int i, ret;
11436
11437         hclge_cmd_setup_basic_desc(desc, cmd, true);
11438         for (i = 0; i < bd_num - 1; i++) {
11439                 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11440                 desc++;
11441                 hclge_cmd_setup_basic_desc(desc, cmd, true);
11442         }
11443
11444         desc = desc_src;
11445         ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
11446         if (ret)
11447                 dev_err(&hdev->pdev->dev,
11448                         "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
11449                         cmd, ret);
11450
11451         return ret;
11452 }
11453
11454 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
11455                                     void *data)
11456 {
11457         int entries_per_desc, reg_num, separator_num, desc_index, index, i;
11458         struct hclge_desc *desc = desc_src;
11459         u32 *reg = data;
11460
11461         entries_per_desc = ARRAY_SIZE(desc->data);
11462         reg_num = entries_per_desc * bd_num;
11463         separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
11464         for (i = 0; i < reg_num; i++) {
11465                 index = i % entries_per_desc;
11466                 desc_index = i / entries_per_desc;
11467                 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
11468         }
11469         for (i = 0; i < separator_num; i++)
11470                 *reg++ = SEPARATOR_VALUE;
11471
11472         return reg_num + separator_num;
11473 }
11474
11475 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
11476 {
11477         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11478         int data_len_per_desc, bd_num, i;
11479         int bd_num_list[BD_LIST_MAX_NUM];
11480         u32 data_len;
11481         int ret;
11482
11483         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11484         if (ret) {
11485                 dev_err(&hdev->pdev->dev,
11486                         "Get dfx reg bd num fail, status is %d.\n", ret);
11487                 return ret;
11488         }
11489
11490         data_len_per_desc = sizeof_field(struct hclge_desc, data);
11491         *len = 0;
11492         for (i = 0; i < dfx_reg_type_num; i++) {
11493                 bd_num = bd_num_list[i];
11494                 data_len = data_len_per_desc * bd_num;
11495                 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
11496         }
11497
11498         return ret;
11499 }
11500
11501 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
11502 {
11503         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11504         int bd_num, bd_num_max, buf_len, i;
11505         int bd_num_list[BD_LIST_MAX_NUM];
11506         struct hclge_desc *desc_src;
11507         u32 *reg = data;
11508         int ret;
11509
11510         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11511         if (ret) {
11512                 dev_err(&hdev->pdev->dev,
11513                         "Get dfx reg bd num fail, status is %d.\n", ret);
11514                 return ret;
11515         }
11516
11517         bd_num_max = bd_num_list[0];
11518         for (i = 1; i < dfx_reg_type_num; i++)
11519                 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
11520
11521         buf_len = sizeof(*desc_src) * bd_num_max;
11522         desc_src = kzalloc(buf_len, GFP_KERNEL);
11523         if (!desc_src)
11524                 return -ENOMEM;
11525
11526         for (i = 0; i < dfx_reg_type_num; i++) {
11527                 bd_num = bd_num_list[i];
11528                 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
11529                                              hclge_dfx_reg_opcode_list[i]);
11530                 if (ret) {
11531                         dev_err(&hdev->pdev->dev,
11532                                 "Get dfx reg fail, status is %d.\n", ret);
11533                         break;
11534                 }
11535
11536                 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
11537         }
11538
11539         kfree(desc_src);
11540         return ret;
11541 }
11542
11543 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
11544                               struct hnae3_knic_private_info *kinfo)
11545 {
11546 #define HCLGE_RING_REG_OFFSET           0x200
11547 #define HCLGE_RING_INT_REG_OFFSET       0x4
11548
11549         int i, j, reg_num, separator_num;
11550         int data_num_sum;
11551         u32 *reg = data;
11552
11553         /* fetching per-PF registers valus from PF PCIe register space */
11554         reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
11555         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11556         for (i = 0; i < reg_num; i++)
11557                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
11558         for (i = 0; i < separator_num; i++)
11559                 *reg++ = SEPARATOR_VALUE;
11560         data_num_sum = reg_num + separator_num;
11561
11562         reg_num = ARRAY_SIZE(common_reg_addr_list);
11563         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11564         for (i = 0; i < reg_num; i++)
11565                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
11566         for (i = 0; i < separator_num; i++)
11567                 *reg++ = SEPARATOR_VALUE;
11568         data_num_sum += reg_num + separator_num;
11569
11570         reg_num = ARRAY_SIZE(ring_reg_addr_list);
11571         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11572         for (j = 0; j < kinfo->num_tqps; j++) {
11573                 for (i = 0; i < reg_num; i++)
11574                         *reg++ = hclge_read_dev(&hdev->hw,
11575                                                 ring_reg_addr_list[i] +
11576                                                 HCLGE_RING_REG_OFFSET * j);
11577                 for (i = 0; i < separator_num; i++)
11578                         *reg++ = SEPARATOR_VALUE;
11579         }
11580         data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
11581
11582         reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
11583         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11584         for (j = 0; j < hdev->num_msi_used - 1; j++) {
11585                 for (i = 0; i < reg_num; i++)
11586                         *reg++ = hclge_read_dev(&hdev->hw,
11587                                                 tqp_intr_reg_addr_list[i] +
11588                                                 HCLGE_RING_INT_REG_OFFSET * j);
11589                 for (i = 0; i < separator_num; i++)
11590                         *reg++ = SEPARATOR_VALUE;
11591         }
11592         data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
11593
11594         return data_num_sum;
11595 }
11596
11597 static int hclge_get_regs_len(struct hnae3_handle *handle)
11598 {
11599         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
11600         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11601         struct hclge_vport *vport = hclge_get_vport(handle);
11602         struct hclge_dev *hdev = vport->back;
11603         int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
11604         int regs_lines_32_bit, regs_lines_64_bit;
11605         int ret;
11606
11607         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
11608         if (ret) {
11609                 dev_err(&hdev->pdev->dev,
11610                         "Get register number failed, ret = %d.\n", ret);
11611                 return ret;
11612         }
11613
11614         ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
11615         if (ret) {
11616                 dev_err(&hdev->pdev->dev,
11617                         "Get dfx reg len failed, ret = %d.\n", ret);
11618                 return ret;
11619         }
11620
11621         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
11622                 REG_SEPARATOR_LINE;
11623         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
11624                 REG_SEPARATOR_LINE;
11625         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
11626                 REG_SEPARATOR_LINE;
11627         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
11628                 REG_SEPARATOR_LINE;
11629         regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
11630                 REG_SEPARATOR_LINE;
11631         regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
11632                 REG_SEPARATOR_LINE;
11633
11634         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
11635                 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
11636                 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
11637 }
11638
11639 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
11640                            void *data)
11641 {
11642         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11643         struct hclge_vport *vport = hclge_get_vport(handle);
11644         struct hclge_dev *hdev = vport->back;
11645         u32 regs_num_32_bit, regs_num_64_bit;
11646         int i, reg_num, separator_num, ret;
11647         u32 *reg = data;
11648
11649         *version = hdev->fw_version;
11650
11651         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
11652         if (ret) {
11653                 dev_err(&hdev->pdev->dev,
11654                         "Get register number failed, ret = %d.\n", ret);
11655                 return;
11656         }
11657
11658         reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
11659
11660         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
11661         if (ret) {
11662                 dev_err(&hdev->pdev->dev,
11663                         "Get 32 bit register failed, ret = %d.\n", ret);
11664                 return;
11665         }
11666         reg_num = regs_num_32_bit;
11667         reg += reg_num;
11668         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11669         for (i = 0; i < separator_num; i++)
11670                 *reg++ = SEPARATOR_VALUE;
11671
11672         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
11673         if (ret) {
11674                 dev_err(&hdev->pdev->dev,
11675                         "Get 64 bit register failed, ret = %d.\n", ret);
11676                 return;
11677         }
11678         reg_num = regs_num_64_bit * 2;
11679         reg += reg_num;
11680         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11681         for (i = 0; i < separator_num; i++)
11682                 *reg++ = SEPARATOR_VALUE;
11683
11684         ret = hclge_get_dfx_reg(hdev, reg);
11685         if (ret)
11686                 dev_err(&hdev->pdev->dev,
11687                         "Get dfx register failed, ret = %d.\n", ret);
11688 }
11689
11690 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
11691 {
11692         struct hclge_set_led_state_cmd *req;
11693         struct hclge_desc desc;
11694         int ret;
11695
11696         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11697
11698         req = (struct hclge_set_led_state_cmd *)desc.data;
11699         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11700                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
11701
11702         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11703         if (ret)
11704                 dev_err(&hdev->pdev->dev,
11705                         "Send set led state cmd error, ret =%d\n", ret);
11706
11707         return ret;
11708 }
11709
11710 enum hclge_led_status {
11711         HCLGE_LED_OFF,
11712         HCLGE_LED_ON,
11713         HCLGE_LED_NO_CHANGE = 0xFF,
11714 };
11715
11716 static int hclge_set_led_id(struct hnae3_handle *handle,
11717                             enum ethtool_phys_id_state status)
11718 {
11719         struct hclge_vport *vport = hclge_get_vport(handle);
11720         struct hclge_dev *hdev = vport->back;
11721
11722         switch (status) {
11723         case ETHTOOL_ID_ACTIVE:
11724                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
11725         case ETHTOOL_ID_INACTIVE:
11726                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
11727         default:
11728                 return -EINVAL;
11729         }
11730 }
11731
11732 static void hclge_get_link_mode(struct hnae3_handle *handle,
11733                                 unsigned long *supported,
11734                                 unsigned long *advertising)
11735 {
11736         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11737         struct hclge_vport *vport = hclge_get_vport(handle);
11738         struct hclge_dev *hdev = vport->back;
11739         unsigned int idx = 0;
11740
11741         for (; idx < size; idx++) {
11742                 supported[idx] = hdev->hw.mac.supported[idx];
11743                 advertising[idx] = hdev->hw.mac.advertising[idx];
11744         }
11745 }
11746
11747 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
11748 {
11749         struct hclge_vport *vport = hclge_get_vport(handle);
11750         struct hclge_dev *hdev = vport->back;
11751
11752         return hclge_config_gro(hdev, enable);
11753 }
11754
11755 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11756 {
11757         struct hclge_vport *vport = &hdev->vport[0];
11758         struct hnae3_handle *handle = &vport->nic;
11759         u8 tmp_flags;
11760         int ret;
11761
11762         if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11763                 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11764                 vport->last_promisc_flags = vport->overflow_promisc_flags;
11765         }
11766
11767         if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11768                 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11769                 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11770                                              tmp_flags & HNAE3_MPE);
11771                 if (!ret) {
11772                         clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11773                         hclge_enable_vlan_filter(handle,
11774                                                  tmp_flags & HNAE3_VLAN_FLTR);
11775                 }
11776         }
11777 }
11778
11779 static bool hclge_module_existed(struct hclge_dev *hdev)
11780 {
11781         struct hclge_desc desc;
11782         u32 existed;
11783         int ret;
11784
11785         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11786         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11787         if (ret) {
11788                 dev_err(&hdev->pdev->dev,
11789                         "failed to get SFP exist state, ret = %d\n", ret);
11790                 return false;
11791         }
11792
11793         existed = le32_to_cpu(desc.data[0]);
11794
11795         return existed != 0;
11796 }
11797
11798 /* need 6 bds(total 140 bytes) in one reading
11799  * return the number of bytes actually read, 0 means read failed.
11800  */
11801 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
11802                                      u32 len, u8 *data)
11803 {
11804         struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
11805         struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
11806         u16 read_len;
11807         u16 copy_len;
11808         int ret;
11809         int i;
11810
11811         /* setup all 6 bds to read module eeprom info. */
11812         for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11813                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
11814                                            true);
11815
11816                 /* bd0~bd4 need next flag */
11817                 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
11818                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11819         }
11820
11821         /* setup bd0, this bd contains offset and read length. */
11822         sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
11823         sfp_info_bd0->offset = cpu_to_le16((u16)offset);
11824         read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
11825         sfp_info_bd0->read_len = cpu_to_le16(read_len);
11826
11827         ret = hclge_cmd_send(&hdev->hw, desc, i);
11828         if (ret) {
11829                 dev_err(&hdev->pdev->dev,
11830                         "failed to get SFP eeprom info, ret = %d\n", ret);
11831                 return 0;
11832         }
11833
11834         /* copy sfp info from bd0 to out buffer. */
11835         copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
11836         memcpy(data, sfp_info_bd0->data, copy_len);
11837         read_len = copy_len;
11838
11839         /* copy sfp info from bd1~bd5 to out buffer if needed. */
11840         for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11841                 if (read_len >= len)
11842                         return read_len;
11843
11844                 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
11845                 memcpy(data + read_len, desc[i].data, copy_len);
11846                 read_len += copy_len;
11847         }
11848
11849         return read_len;
11850 }
11851
11852 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
11853                                    u32 len, u8 *data)
11854 {
11855         struct hclge_vport *vport = hclge_get_vport(handle);
11856         struct hclge_dev *hdev = vport->back;
11857         u32 read_len = 0;
11858         u16 data_len;
11859
11860         if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
11861                 return -EOPNOTSUPP;
11862
11863         if (!hclge_module_existed(hdev))
11864                 return -ENXIO;
11865
11866         while (read_len < len) {
11867                 data_len = hclge_get_sfp_eeprom_info(hdev,
11868                                                      offset + read_len,
11869                                                      len - read_len,
11870                                                      data + read_len);
11871                 if (!data_len)
11872                         return -EIO;
11873
11874                 read_len += data_len;
11875         }
11876
11877         return 0;
11878 }
11879
11880 static const struct hnae3_ae_ops hclge_ops = {
11881         .init_ae_dev = hclge_init_ae_dev,
11882         .uninit_ae_dev = hclge_uninit_ae_dev,
11883         .flr_prepare = hclge_flr_prepare,
11884         .flr_done = hclge_flr_done,
11885         .init_client_instance = hclge_init_client_instance,
11886         .uninit_client_instance = hclge_uninit_client_instance,
11887         .map_ring_to_vector = hclge_map_ring_to_vector,
11888         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
11889         .get_vector = hclge_get_vector,
11890         .put_vector = hclge_put_vector,
11891         .set_promisc_mode = hclge_set_promisc_mode,
11892         .request_update_promisc_mode = hclge_request_update_promisc_mode,
11893         .set_loopback = hclge_set_loopback,
11894         .start = hclge_ae_start,
11895         .stop = hclge_ae_stop,
11896         .client_start = hclge_client_start,
11897         .client_stop = hclge_client_stop,
11898         .get_status = hclge_get_status,
11899         .get_ksettings_an_result = hclge_get_ksettings_an_result,
11900         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
11901         .get_media_type = hclge_get_media_type,
11902         .check_port_speed = hclge_check_port_speed,
11903         .get_fec = hclge_get_fec,
11904         .set_fec = hclge_set_fec,
11905         .get_rss_key_size = hclge_get_rss_key_size,
11906         .get_rss = hclge_get_rss,
11907         .set_rss = hclge_set_rss,
11908         .set_rss_tuple = hclge_set_rss_tuple,
11909         .get_rss_tuple = hclge_get_rss_tuple,
11910         .get_tc_size = hclge_get_tc_size,
11911         .get_mac_addr = hclge_get_mac_addr,
11912         .set_mac_addr = hclge_set_mac_addr,
11913         .do_ioctl = hclge_do_ioctl,
11914         .add_uc_addr = hclge_add_uc_addr,
11915         .rm_uc_addr = hclge_rm_uc_addr,
11916         .add_mc_addr = hclge_add_mc_addr,
11917         .rm_mc_addr = hclge_rm_mc_addr,
11918         .set_autoneg = hclge_set_autoneg,
11919         .get_autoneg = hclge_get_autoneg,
11920         .restart_autoneg = hclge_restart_autoneg,
11921         .halt_autoneg = hclge_halt_autoneg,
11922         .get_pauseparam = hclge_get_pauseparam,
11923         .set_pauseparam = hclge_set_pauseparam,
11924         .set_mtu = hclge_set_mtu,
11925         .reset_queue = hclge_reset_tqp,
11926         .get_stats = hclge_get_stats,
11927         .get_mac_stats = hclge_get_mac_stat,
11928         .update_stats = hclge_update_stats,
11929         .get_strings = hclge_get_strings,
11930         .get_sset_count = hclge_get_sset_count,
11931         .get_fw_version = hclge_get_fw_version,
11932         .get_mdix_mode = hclge_get_mdix_mode,
11933         .enable_vlan_filter = hclge_enable_vlan_filter,
11934         .set_vlan_filter = hclge_set_vlan_filter,
11935         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
11936         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
11937         .reset_event = hclge_reset_event,
11938         .get_reset_level = hclge_get_reset_level,
11939         .set_default_reset_request = hclge_set_def_reset_request,
11940         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
11941         .set_channels = hclge_set_channels,
11942         .get_channels = hclge_get_channels,
11943         .get_regs_len = hclge_get_regs_len,
11944         .get_regs = hclge_get_regs,
11945         .set_led_id = hclge_set_led_id,
11946         .get_link_mode = hclge_get_link_mode,
11947         .add_fd_entry = hclge_add_fd_entry,
11948         .del_fd_entry = hclge_del_fd_entry,
11949         .del_all_fd_entries = hclge_del_all_fd_entries,
11950         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
11951         .get_fd_rule_info = hclge_get_fd_rule_info,
11952         .get_fd_all_rules = hclge_get_all_rules,
11953         .enable_fd = hclge_enable_fd,
11954         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
11955         .dbg_run_cmd = hclge_dbg_run_cmd,
11956         .dbg_read_cmd = hclge_dbg_read_cmd,
11957         .handle_hw_ras_error = hclge_handle_hw_ras_error,
11958         .get_hw_reset_stat = hclge_get_hw_reset_stat,
11959         .ae_dev_resetting = hclge_ae_dev_resetting,
11960         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
11961         .set_gro_en = hclge_gro_en,
11962         .get_global_queue_id = hclge_covert_handle_qid_global,
11963         .set_timer_task = hclge_set_timer_task,
11964         .mac_connect_phy = hclge_mac_connect_phy,
11965         .mac_disconnect_phy = hclge_mac_disconnect_phy,
11966         .get_vf_config = hclge_get_vf_config,
11967         .set_vf_link_state = hclge_set_vf_link_state,
11968         .set_vf_spoofchk = hclge_set_vf_spoofchk,
11969         .set_vf_trust = hclge_set_vf_trust,
11970         .set_vf_rate = hclge_set_vf_rate,
11971         .set_vf_mac = hclge_set_vf_mac,
11972         .get_module_eeprom = hclge_get_module_eeprom,
11973         .get_cmdq_stat = hclge_get_cmdq_stat,
11974         .add_cls_flower = hclge_add_cls_flower,
11975         .del_cls_flower = hclge_del_cls_flower,
11976         .cls_flower_active = hclge_is_cls_flower_active,
11977 };
11978
11979 static struct hnae3_ae_algo ae_algo = {
11980         .ops = &hclge_ops,
11981         .pdev_id_table = ae_algo_pci_tbl,
11982 };
11983
11984 static int hclge_init(void)
11985 {
11986         pr_info("%s is initializing\n", HCLGE_NAME);
11987
11988         hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
11989         if (!hclge_wq) {
11990                 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
11991                 return -ENOMEM;
11992         }
11993
11994         hnae3_register_ae_algo(&ae_algo);
11995
11996         return 0;
11997 }
11998
11999 static void hclge_exit(void)
12000 {
12001         hnae3_unregister_ae_algo(&ae_algo);
12002         destroy_workqueue(hclge_wq);
12003 }
12004 module_init(hclge_init);
12005 module_exit(hclge_exit);
12006
12007 MODULE_LICENSE("GPL");
12008 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
12009 MODULE_DESCRIPTION("HCLGE Driver");
12010 MODULE_VERSION(HCLGE_MOD_VERSION);