net: hns3: add support for imp-controlled PHYs
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/ipv6.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
23 #include "hclge_tm.h"
24 #include "hclge_err.h"
25 #include "hnae3.h"
26
27 #define HCLGE_NAME                      "hclge"
28 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
29 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30
31 #define HCLGE_BUF_SIZE_UNIT     256U
32 #define HCLGE_BUF_MUL_BY        2
33 #define HCLGE_BUF_DIV_BY        2
34 #define NEED_RESERVE_TC_NUM     2
35 #define BUF_MAX_PERCENT         100
36 #define BUF_RESERVE_PERCENT     90
37
38 #define HCLGE_RESET_MAX_FAIL_CNT        5
39 #define HCLGE_RESET_SYNC_TIME           100
40 #define HCLGE_PF_RESET_SYNC_TIME        20
41 #define HCLGE_PF_RESET_SYNC_CNT         1500
42
43 /* Get DFX BD number offset */
44 #define HCLGE_DFX_BIOS_BD_OFFSET        1
45 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
46 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
47 #define HCLGE_DFX_IGU_BD_OFFSET         4
48 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
49 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
50 #define HCLGE_DFX_NCSI_BD_OFFSET        7
51 #define HCLGE_DFX_RTC_BD_OFFSET         8
52 #define HCLGE_DFX_PPP_BD_OFFSET         9
53 #define HCLGE_DFX_RCB_BD_OFFSET         10
54 #define HCLGE_DFX_TQP_BD_OFFSET         11
55 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
56
57 #define HCLGE_LINK_STATUS_MS    10
58
59 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
60 static int hclge_init_vlan_config(struct hclge_dev *hdev);
61 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
62 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
63 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
64 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
65 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
66 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
67                                                    unsigned long *addr);
68 static int hclge_set_default_loopback(struct hclge_dev *hdev);
69
70 static void hclge_sync_mac_table(struct hclge_dev *hdev);
71 static void hclge_restore_hw_table(struct hclge_dev *hdev);
72 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
73
74 static struct hnae3_ae_algo ae_algo;
75
76 static struct workqueue_struct *hclge_wq;
77
78 static const struct pci_device_id ae_algo_pci_tbl[] = {
79         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
80         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
81         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
82         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
83         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
84         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
85         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
86         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
87         /* required last entry */
88         {0, }
89 };
90
91 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
92
93 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
94                                          HCLGE_CMDQ_TX_ADDR_H_REG,
95                                          HCLGE_CMDQ_TX_DEPTH_REG,
96                                          HCLGE_CMDQ_TX_TAIL_REG,
97                                          HCLGE_CMDQ_TX_HEAD_REG,
98                                          HCLGE_CMDQ_RX_ADDR_L_REG,
99                                          HCLGE_CMDQ_RX_ADDR_H_REG,
100                                          HCLGE_CMDQ_RX_DEPTH_REG,
101                                          HCLGE_CMDQ_RX_TAIL_REG,
102                                          HCLGE_CMDQ_RX_HEAD_REG,
103                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
104                                          HCLGE_CMDQ_INTR_STS_REG,
105                                          HCLGE_CMDQ_INTR_EN_REG,
106                                          HCLGE_CMDQ_INTR_GEN_REG};
107
108 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
109                                            HCLGE_VECTOR0_OTER_EN_REG,
110                                            HCLGE_MISC_RESET_STS_REG,
111                                            HCLGE_MISC_VECTOR_INT_STS,
112                                            HCLGE_GLOBAL_RESET_REG,
113                                            HCLGE_FUN_RST_ING,
114                                            HCLGE_GRO_EN_REG};
115
116 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
117                                          HCLGE_RING_RX_ADDR_H_REG,
118                                          HCLGE_RING_RX_BD_NUM_REG,
119                                          HCLGE_RING_RX_BD_LENGTH_REG,
120                                          HCLGE_RING_RX_MERGE_EN_REG,
121                                          HCLGE_RING_RX_TAIL_REG,
122                                          HCLGE_RING_RX_HEAD_REG,
123                                          HCLGE_RING_RX_FBD_NUM_REG,
124                                          HCLGE_RING_RX_OFFSET_REG,
125                                          HCLGE_RING_RX_FBD_OFFSET_REG,
126                                          HCLGE_RING_RX_STASH_REG,
127                                          HCLGE_RING_RX_BD_ERR_REG,
128                                          HCLGE_RING_TX_ADDR_L_REG,
129                                          HCLGE_RING_TX_ADDR_H_REG,
130                                          HCLGE_RING_TX_BD_NUM_REG,
131                                          HCLGE_RING_TX_PRIORITY_REG,
132                                          HCLGE_RING_TX_TC_REG,
133                                          HCLGE_RING_TX_MERGE_EN_REG,
134                                          HCLGE_RING_TX_TAIL_REG,
135                                          HCLGE_RING_TX_HEAD_REG,
136                                          HCLGE_RING_TX_FBD_NUM_REG,
137                                          HCLGE_RING_TX_OFFSET_REG,
138                                          HCLGE_RING_TX_EBD_NUM_REG,
139                                          HCLGE_RING_TX_EBD_OFFSET_REG,
140                                          HCLGE_RING_TX_BD_ERR_REG,
141                                          HCLGE_RING_EN_REG};
142
143 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
144                                              HCLGE_TQP_INTR_GL0_REG,
145                                              HCLGE_TQP_INTR_GL1_REG,
146                                              HCLGE_TQP_INTR_GL2_REG,
147                                              HCLGE_TQP_INTR_RL_REG};
148
149 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
150         "App    Loopback test",
151         "Serdes serial Loopback test",
152         "Serdes parallel Loopback test",
153         "Phy    Loopback test"
154 };
155
156 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
157         {"mac_tx_mac_pause_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
159         {"mac_rx_mac_pause_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
161         {"mac_tx_control_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
163         {"mac_rx_control_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
165         {"mac_tx_pfc_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
167         {"mac_tx_pfc_pri0_pkt_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
169         {"mac_tx_pfc_pri1_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
171         {"mac_tx_pfc_pri2_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
173         {"mac_tx_pfc_pri3_pkt_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
175         {"mac_tx_pfc_pri4_pkt_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
177         {"mac_tx_pfc_pri5_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
179         {"mac_tx_pfc_pri6_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
181         {"mac_tx_pfc_pri7_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
183         {"mac_rx_pfc_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
185         {"mac_rx_pfc_pri0_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
187         {"mac_rx_pfc_pri1_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
189         {"mac_rx_pfc_pri2_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
191         {"mac_rx_pfc_pri3_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
193         {"mac_rx_pfc_pri4_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
195         {"mac_rx_pfc_pri5_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
197         {"mac_rx_pfc_pri6_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
199         {"mac_rx_pfc_pri7_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
201         {"mac_tx_total_pkt_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
203         {"mac_tx_total_oct_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
205         {"mac_tx_good_pkt_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
207         {"mac_tx_bad_pkt_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
209         {"mac_tx_good_oct_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
211         {"mac_tx_bad_oct_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
213         {"mac_tx_uni_pkt_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
215         {"mac_tx_multi_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
217         {"mac_tx_broad_pkt_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
219         {"mac_tx_undersize_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
221         {"mac_tx_oversize_pkt_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
223         {"mac_tx_64_oct_pkt_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
225         {"mac_tx_65_127_oct_pkt_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
227         {"mac_tx_128_255_oct_pkt_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
229         {"mac_tx_256_511_oct_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
231         {"mac_tx_512_1023_oct_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
233         {"mac_tx_1024_1518_oct_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
235         {"mac_tx_1519_2047_oct_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
237         {"mac_tx_2048_4095_oct_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
239         {"mac_tx_4096_8191_oct_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
241         {"mac_tx_8192_9216_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
243         {"mac_tx_9217_12287_oct_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
245         {"mac_tx_12288_16383_oct_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
247         {"mac_tx_1519_max_good_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
249         {"mac_tx_1519_max_bad_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
251         {"mac_rx_total_pkt_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
253         {"mac_rx_total_oct_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
255         {"mac_rx_good_pkt_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
257         {"mac_rx_bad_pkt_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
259         {"mac_rx_good_oct_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
261         {"mac_rx_bad_oct_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
263         {"mac_rx_uni_pkt_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
265         {"mac_rx_multi_pkt_num",
266                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
267         {"mac_rx_broad_pkt_num",
268                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
269         {"mac_rx_undersize_pkt_num",
270                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
271         {"mac_rx_oversize_pkt_num",
272                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
273         {"mac_rx_64_oct_pkt_num",
274                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
275         {"mac_rx_65_127_oct_pkt_num",
276                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
277         {"mac_rx_128_255_oct_pkt_num",
278                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
279         {"mac_rx_256_511_oct_pkt_num",
280                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
281         {"mac_rx_512_1023_oct_pkt_num",
282                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
283         {"mac_rx_1024_1518_oct_pkt_num",
284                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
285         {"mac_rx_1519_2047_oct_pkt_num",
286                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
287         {"mac_rx_2048_4095_oct_pkt_num",
288                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
289         {"mac_rx_4096_8191_oct_pkt_num",
290                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
291         {"mac_rx_8192_9216_oct_pkt_num",
292                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
293         {"mac_rx_9217_12287_oct_pkt_num",
294                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
295         {"mac_rx_12288_16383_oct_pkt_num",
296                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
297         {"mac_rx_1519_max_good_pkt_num",
298                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
299         {"mac_rx_1519_max_bad_pkt_num",
300                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
301
302         {"mac_tx_fragment_pkt_num",
303                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
304         {"mac_tx_undermin_pkt_num",
305                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
306         {"mac_tx_jabber_pkt_num",
307                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
308         {"mac_tx_err_all_pkt_num",
309                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
310         {"mac_tx_from_app_good_pkt_num",
311                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
312         {"mac_tx_from_app_bad_pkt_num",
313                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
314         {"mac_rx_fragment_pkt_num",
315                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
316         {"mac_rx_undermin_pkt_num",
317                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
318         {"mac_rx_jabber_pkt_num",
319                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
320         {"mac_rx_fcs_err_pkt_num",
321                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
322         {"mac_rx_send_app_good_pkt_num",
323                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
324         {"mac_rx_send_app_bad_pkt_num",
325                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
326 };
327
328 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
329         {
330                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
331                 .ethter_type = cpu_to_le16(ETH_P_LLDP),
332                 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
333                 .i_port_bitmap = 0x1,
334         },
335 };
336
337 static const u8 hclge_hash_key[] = {
338         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
339         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
340         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
341         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
342         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
343 };
344
345 static const u32 hclge_dfx_bd_offset_list[] = {
346         HCLGE_DFX_BIOS_BD_OFFSET,
347         HCLGE_DFX_SSU_0_BD_OFFSET,
348         HCLGE_DFX_SSU_1_BD_OFFSET,
349         HCLGE_DFX_IGU_BD_OFFSET,
350         HCLGE_DFX_RPU_0_BD_OFFSET,
351         HCLGE_DFX_RPU_1_BD_OFFSET,
352         HCLGE_DFX_NCSI_BD_OFFSET,
353         HCLGE_DFX_RTC_BD_OFFSET,
354         HCLGE_DFX_PPP_BD_OFFSET,
355         HCLGE_DFX_RCB_BD_OFFSET,
356         HCLGE_DFX_TQP_BD_OFFSET,
357         HCLGE_DFX_SSU_2_BD_OFFSET
358 };
359
360 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
361         HCLGE_OPC_DFX_BIOS_COMMON_REG,
362         HCLGE_OPC_DFX_SSU_REG_0,
363         HCLGE_OPC_DFX_SSU_REG_1,
364         HCLGE_OPC_DFX_IGU_EGU_REG,
365         HCLGE_OPC_DFX_RPU_REG_0,
366         HCLGE_OPC_DFX_RPU_REG_1,
367         HCLGE_OPC_DFX_NCSI_REG,
368         HCLGE_OPC_DFX_RTC_REG,
369         HCLGE_OPC_DFX_PPP_REG,
370         HCLGE_OPC_DFX_RCB_REG,
371         HCLGE_OPC_DFX_TQP_REG,
372         HCLGE_OPC_DFX_SSU_REG_2
373 };
374
375 static const struct key_info meta_data_key_info[] = {
376         { PACKET_TYPE_ID, 6},
377         { IP_FRAGEMENT, 1},
378         { ROCE_TYPE, 1},
379         { NEXT_KEY, 5},
380         { VLAN_NUMBER, 2},
381         { SRC_VPORT, 12},
382         { DST_VPORT, 12},
383         { TUNNEL_PACKET, 1},
384 };
385
386 static const struct key_info tuple_key_info[] = {
387         { OUTER_DST_MAC, 48},
388         { OUTER_SRC_MAC, 48},
389         { OUTER_VLAN_TAG_FST, 16},
390         { OUTER_VLAN_TAG_SEC, 16},
391         { OUTER_ETH_TYPE, 16},
392         { OUTER_L2_RSV, 16},
393         { OUTER_IP_TOS, 8},
394         { OUTER_IP_PROTO, 8},
395         { OUTER_SRC_IP, 32},
396         { OUTER_DST_IP, 32},
397         { OUTER_L3_RSV, 16},
398         { OUTER_SRC_PORT, 16},
399         { OUTER_DST_PORT, 16},
400         { OUTER_L4_RSV, 32},
401         { OUTER_TUN_VNI, 24},
402         { OUTER_TUN_FLOW_ID, 8},
403         { INNER_DST_MAC, 48},
404         { INNER_SRC_MAC, 48},
405         { INNER_VLAN_TAG_FST, 16},
406         { INNER_VLAN_TAG_SEC, 16},
407         { INNER_ETH_TYPE, 16},
408         { INNER_L2_RSV, 16},
409         { INNER_IP_TOS, 8},
410         { INNER_IP_PROTO, 8},
411         { INNER_SRC_IP, 32},
412         { INNER_DST_IP, 32},
413         { INNER_L3_RSV, 16},
414         { INNER_SRC_PORT, 16},
415         { INNER_DST_PORT, 16},
416         { INNER_L4_RSV, 32},
417 };
418
419 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
420 {
421 #define HCLGE_MAC_CMD_NUM 21
422
423         u64 *data = (u64 *)(&hdev->mac_stats);
424         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
425         __le64 *desc_data;
426         int i, k, n;
427         int ret;
428
429         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
430         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
431         if (ret) {
432                 dev_err(&hdev->pdev->dev,
433                         "Get MAC pkt stats fail, status = %d.\n", ret);
434
435                 return ret;
436         }
437
438         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
439                 /* for special opcode 0032, only the first desc has the head */
440                 if (unlikely(i == 0)) {
441                         desc_data = (__le64 *)(&desc[i].data[0]);
442                         n = HCLGE_RD_FIRST_STATS_NUM;
443                 } else {
444                         desc_data = (__le64 *)(&desc[i]);
445                         n = HCLGE_RD_OTHER_STATS_NUM;
446                 }
447
448                 for (k = 0; k < n; k++) {
449                         *data += le64_to_cpu(*desc_data);
450                         data++;
451                         desc_data++;
452                 }
453         }
454
455         return 0;
456 }
457
458 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
459 {
460         u64 *data = (u64 *)(&hdev->mac_stats);
461         struct hclge_desc *desc;
462         __le64 *desc_data;
463         u16 i, k, n;
464         int ret;
465
466         /* This may be called inside atomic sections,
467          * so GFP_ATOMIC is more suitalbe here
468          */
469         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
470         if (!desc)
471                 return -ENOMEM;
472
473         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
474         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
475         if (ret) {
476                 kfree(desc);
477                 return ret;
478         }
479
480         for (i = 0; i < desc_num; i++) {
481                 /* for special opcode 0034, only the first desc has the head */
482                 if (i == 0) {
483                         desc_data = (__le64 *)(&desc[i].data[0]);
484                         n = HCLGE_RD_FIRST_STATS_NUM;
485                 } else {
486                         desc_data = (__le64 *)(&desc[i]);
487                         n = HCLGE_RD_OTHER_STATS_NUM;
488                 }
489
490                 for (k = 0; k < n; k++) {
491                         *data += le64_to_cpu(*desc_data);
492                         data++;
493                         desc_data++;
494                 }
495         }
496
497         kfree(desc);
498
499         return 0;
500 }
501
502 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
503 {
504         struct hclge_desc desc;
505         __le32 *desc_data;
506         u32 reg_num;
507         int ret;
508
509         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
510         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
511         if (ret)
512                 return ret;
513
514         desc_data = (__le32 *)(&desc.data[0]);
515         reg_num = le32_to_cpu(*desc_data);
516
517         *desc_num = 1 + ((reg_num - 3) >> 2) +
518                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
519
520         return 0;
521 }
522
523 static int hclge_mac_update_stats(struct hclge_dev *hdev)
524 {
525         u32 desc_num;
526         int ret;
527
528         ret = hclge_mac_query_reg_num(hdev, &desc_num);
529
530         /* The firmware supports the new statistics acquisition method */
531         if (!ret)
532                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
533         else if (ret == -EOPNOTSUPP)
534                 ret = hclge_mac_update_stats_defective(hdev);
535         else
536                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
537
538         return ret;
539 }
540
541 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
542 {
543         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
544         struct hclge_vport *vport = hclge_get_vport(handle);
545         struct hclge_dev *hdev = vport->back;
546         struct hnae3_queue *queue;
547         struct hclge_desc desc[1];
548         struct hclge_tqp *tqp;
549         int ret, i;
550
551         for (i = 0; i < kinfo->num_tqps; i++) {
552                 queue = handle->kinfo.tqp[i];
553                 tqp = container_of(queue, struct hclge_tqp, q);
554                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
555                 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
556                                            true);
557
558                 desc[0].data[0] = cpu_to_le32(tqp->index);
559                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
560                 if (ret) {
561                         dev_err(&hdev->pdev->dev,
562                                 "Query tqp stat fail, status = %d,queue = %d\n",
563                                 ret, i);
564                         return ret;
565                 }
566                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
567                         le32_to_cpu(desc[0].data[1]);
568         }
569
570         for (i = 0; i < kinfo->num_tqps; i++) {
571                 queue = handle->kinfo.tqp[i];
572                 tqp = container_of(queue, struct hclge_tqp, q);
573                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
574                 hclge_cmd_setup_basic_desc(&desc[0],
575                                            HCLGE_OPC_QUERY_TX_STATS,
576                                            true);
577
578                 desc[0].data[0] = cpu_to_le32(tqp->index);
579                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
580                 if (ret) {
581                         dev_err(&hdev->pdev->dev,
582                                 "Query tqp stat fail, status = %d,queue = %d\n",
583                                 ret, i);
584                         return ret;
585                 }
586                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
587                         le32_to_cpu(desc[0].data[1]);
588         }
589
590         return 0;
591 }
592
593 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
594 {
595         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
596         struct hclge_tqp *tqp;
597         u64 *buff = data;
598         int i;
599
600         for (i = 0; i < kinfo->num_tqps; i++) {
601                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
602                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
603         }
604
605         for (i = 0; i < kinfo->num_tqps; i++) {
606                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
607                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
608         }
609
610         return buff;
611 }
612
613 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
614 {
615         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
616
617         /* each tqp has TX & RX two queues */
618         return kinfo->num_tqps * (2);
619 }
620
621 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
622 {
623         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
624         u8 *buff = data;
625         int i;
626
627         for (i = 0; i < kinfo->num_tqps; i++) {
628                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
629                         struct hclge_tqp, q);
630                 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
631                          tqp->index);
632                 buff = buff + ETH_GSTRING_LEN;
633         }
634
635         for (i = 0; i < kinfo->num_tqps; i++) {
636                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
637                         struct hclge_tqp, q);
638                 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
639                          tqp->index);
640                 buff = buff + ETH_GSTRING_LEN;
641         }
642
643         return buff;
644 }
645
646 static u64 *hclge_comm_get_stats(const void *comm_stats,
647                                  const struct hclge_comm_stats_str strs[],
648                                  int size, u64 *data)
649 {
650         u64 *buf = data;
651         u32 i;
652
653         for (i = 0; i < size; i++)
654                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
655
656         return buf + size;
657 }
658
659 static u8 *hclge_comm_get_strings(u32 stringset,
660                                   const struct hclge_comm_stats_str strs[],
661                                   int size, u8 *data)
662 {
663         char *buff = (char *)data;
664         u32 i;
665
666         if (stringset != ETH_SS_STATS)
667                 return buff;
668
669         for (i = 0; i < size; i++) {
670                 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
671                 buff = buff + ETH_GSTRING_LEN;
672         }
673
674         return (u8 *)buff;
675 }
676
677 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
678 {
679         struct hnae3_handle *handle;
680         int status;
681
682         handle = &hdev->vport[0].nic;
683         if (handle->client) {
684                 status = hclge_tqps_update_stats(handle);
685                 if (status) {
686                         dev_err(&hdev->pdev->dev,
687                                 "Update TQPS stats fail, status = %d.\n",
688                                 status);
689                 }
690         }
691
692         status = hclge_mac_update_stats(hdev);
693         if (status)
694                 dev_err(&hdev->pdev->dev,
695                         "Update MAC stats fail, status = %d.\n", status);
696 }
697
698 static void hclge_update_stats(struct hnae3_handle *handle,
699                                struct net_device_stats *net_stats)
700 {
701         struct hclge_vport *vport = hclge_get_vport(handle);
702         struct hclge_dev *hdev = vport->back;
703         int status;
704
705         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
706                 return;
707
708         status = hclge_mac_update_stats(hdev);
709         if (status)
710                 dev_err(&hdev->pdev->dev,
711                         "Update MAC stats fail, status = %d.\n",
712                         status);
713
714         status = hclge_tqps_update_stats(handle);
715         if (status)
716                 dev_err(&hdev->pdev->dev,
717                         "Update TQPS stats fail, status = %d.\n",
718                         status);
719
720         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
721 }
722
723 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
724 {
725 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
726                 HNAE3_SUPPORT_PHY_LOOPBACK |\
727                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
728                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
729
730         struct hclge_vport *vport = hclge_get_vport(handle);
731         struct hclge_dev *hdev = vport->back;
732         int count = 0;
733
734         /* Loopback test support rules:
735          * mac: only GE mode support
736          * serdes: all mac mode will support include GE/XGE/LGE/CGE
737          * phy: only support when phy device exist on board
738          */
739         if (stringset == ETH_SS_TEST) {
740                 /* clear loopback bit flags at first */
741                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
742                 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
743                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
744                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
745                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
746                         count += 1;
747                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
748                 }
749
750                 count += 2;
751                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
752                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
753
754                 if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
755                     hdev->hw.mac.phydev->drv->set_loopback) {
756                         count += 1;
757                         handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
758                 }
759
760         } else if (stringset == ETH_SS_STATS) {
761                 count = ARRAY_SIZE(g_mac_stats_string) +
762                         hclge_tqps_get_sset_count(handle, stringset);
763         }
764
765         return count;
766 }
767
768 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
769                               u8 *data)
770 {
771         u8 *p = (char *)data;
772         int size;
773
774         if (stringset == ETH_SS_STATS) {
775                 size = ARRAY_SIZE(g_mac_stats_string);
776                 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
777                                            size, p);
778                 p = hclge_tqps_get_strings(handle, p);
779         } else if (stringset == ETH_SS_TEST) {
780                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
781                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
782                                ETH_GSTRING_LEN);
783                         p += ETH_GSTRING_LEN;
784                 }
785                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
786                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
787                                ETH_GSTRING_LEN);
788                         p += ETH_GSTRING_LEN;
789                 }
790                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
791                         memcpy(p,
792                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
793                                ETH_GSTRING_LEN);
794                         p += ETH_GSTRING_LEN;
795                 }
796                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
797                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
798                                ETH_GSTRING_LEN);
799                         p += ETH_GSTRING_LEN;
800                 }
801         }
802 }
803
804 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
805 {
806         struct hclge_vport *vport = hclge_get_vport(handle);
807         struct hclge_dev *hdev = vport->back;
808         u64 *p;
809
810         p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
811                                  ARRAY_SIZE(g_mac_stats_string), data);
812         p = hclge_tqps_get_stats(handle, p);
813 }
814
815 static void hclge_get_mac_stat(struct hnae3_handle *handle,
816                                struct hns3_mac_stats *mac_stats)
817 {
818         struct hclge_vport *vport = hclge_get_vport(handle);
819         struct hclge_dev *hdev = vport->back;
820
821         hclge_update_stats(handle, NULL);
822
823         mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
824         mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
825 }
826
827 static int hclge_parse_func_status(struct hclge_dev *hdev,
828                                    struct hclge_func_status_cmd *status)
829 {
830 #define HCLGE_MAC_ID_MASK       0xF
831
832         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
833                 return -EINVAL;
834
835         /* Set the pf to main pf */
836         if (status->pf_state & HCLGE_PF_STATE_MAIN)
837                 hdev->flag |= HCLGE_FLAG_MAIN;
838         else
839                 hdev->flag &= ~HCLGE_FLAG_MAIN;
840
841         hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
842         return 0;
843 }
844
845 static int hclge_query_function_status(struct hclge_dev *hdev)
846 {
847 #define HCLGE_QUERY_MAX_CNT     5
848
849         struct hclge_func_status_cmd *req;
850         struct hclge_desc desc;
851         int timeout = 0;
852         int ret;
853
854         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
855         req = (struct hclge_func_status_cmd *)desc.data;
856
857         do {
858                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
859                 if (ret) {
860                         dev_err(&hdev->pdev->dev,
861                                 "query function status failed %d.\n", ret);
862                         return ret;
863                 }
864
865                 /* Check pf reset is done */
866                 if (req->pf_state)
867                         break;
868                 usleep_range(1000, 2000);
869         } while (timeout++ < HCLGE_QUERY_MAX_CNT);
870
871         return hclge_parse_func_status(hdev, req);
872 }
873
874 static int hclge_query_pf_resource(struct hclge_dev *hdev)
875 {
876         struct hclge_pf_res_cmd *req;
877         struct hclge_desc desc;
878         int ret;
879
880         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
881         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
882         if (ret) {
883                 dev_err(&hdev->pdev->dev,
884                         "query pf resource failed %d.\n", ret);
885                 return ret;
886         }
887
888         req = (struct hclge_pf_res_cmd *)desc.data;
889         hdev->num_tqps = le16_to_cpu(req->tqp_num) +
890                          le16_to_cpu(req->ext_tqp_num);
891         hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
892
893         if (req->tx_buf_size)
894                 hdev->tx_buf_size =
895                         le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
896         else
897                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
898
899         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
900
901         if (req->dv_buf_size)
902                 hdev->dv_buf_size =
903                         le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
904         else
905                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
906
907         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
908
909         hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
910         if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
911                 dev_err(&hdev->pdev->dev,
912                         "only %u msi resources available, not enough for pf(min:2).\n",
913                         hdev->num_nic_msi);
914                 return -EINVAL;
915         }
916
917         if (hnae3_dev_roce_supported(hdev)) {
918                 hdev->num_roce_msi =
919                         le16_to_cpu(req->pf_intr_vector_number_roce);
920
921                 /* PF should have NIC vectors and Roce vectors,
922                  * NIC vectors are queued before Roce vectors.
923                  */
924                 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
925         } else {
926                 hdev->num_msi = hdev->num_nic_msi;
927         }
928
929         return 0;
930 }
931
932 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
933 {
934         switch (speed_cmd) {
935         case 6:
936                 *speed = HCLGE_MAC_SPEED_10M;
937                 break;
938         case 7:
939                 *speed = HCLGE_MAC_SPEED_100M;
940                 break;
941         case 0:
942                 *speed = HCLGE_MAC_SPEED_1G;
943                 break;
944         case 1:
945                 *speed = HCLGE_MAC_SPEED_10G;
946                 break;
947         case 2:
948                 *speed = HCLGE_MAC_SPEED_25G;
949                 break;
950         case 3:
951                 *speed = HCLGE_MAC_SPEED_40G;
952                 break;
953         case 4:
954                 *speed = HCLGE_MAC_SPEED_50G;
955                 break;
956         case 5:
957                 *speed = HCLGE_MAC_SPEED_100G;
958                 break;
959         case 8:
960                 *speed = HCLGE_MAC_SPEED_200G;
961                 break;
962         default:
963                 return -EINVAL;
964         }
965
966         return 0;
967 }
968
969 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
970 {
971         struct hclge_vport *vport = hclge_get_vport(handle);
972         struct hclge_dev *hdev = vport->back;
973         u32 speed_ability = hdev->hw.mac.speed_ability;
974         u32 speed_bit = 0;
975
976         switch (speed) {
977         case HCLGE_MAC_SPEED_10M:
978                 speed_bit = HCLGE_SUPPORT_10M_BIT;
979                 break;
980         case HCLGE_MAC_SPEED_100M:
981                 speed_bit = HCLGE_SUPPORT_100M_BIT;
982                 break;
983         case HCLGE_MAC_SPEED_1G:
984                 speed_bit = HCLGE_SUPPORT_1G_BIT;
985                 break;
986         case HCLGE_MAC_SPEED_10G:
987                 speed_bit = HCLGE_SUPPORT_10G_BIT;
988                 break;
989         case HCLGE_MAC_SPEED_25G:
990                 speed_bit = HCLGE_SUPPORT_25G_BIT;
991                 break;
992         case HCLGE_MAC_SPEED_40G:
993                 speed_bit = HCLGE_SUPPORT_40G_BIT;
994                 break;
995         case HCLGE_MAC_SPEED_50G:
996                 speed_bit = HCLGE_SUPPORT_50G_BIT;
997                 break;
998         case HCLGE_MAC_SPEED_100G:
999                 speed_bit = HCLGE_SUPPORT_100G_BIT;
1000                 break;
1001         case HCLGE_MAC_SPEED_200G:
1002                 speed_bit = HCLGE_SUPPORT_200G_BIT;
1003                 break;
1004         default:
1005                 return -EINVAL;
1006         }
1007
1008         if (speed_bit & speed_ability)
1009                 return 0;
1010
1011         return -EINVAL;
1012 }
1013
1014 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1015 {
1016         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1017                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1018                                  mac->supported);
1019         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1020                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1021                                  mac->supported);
1022         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1023                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1024                                  mac->supported);
1025         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1026                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1027                                  mac->supported);
1028         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1029                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1030                                  mac->supported);
1031         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1032                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1033                                  mac->supported);
1034 }
1035
1036 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1037 {
1038         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1039                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1040                                  mac->supported);
1041         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1042                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1043                                  mac->supported);
1044         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1045                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1046                                  mac->supported);
1047         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1048                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1049                                  mac->supported);
1050         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1051                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1052                                  mac->supported);
1053         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1054                 linkmode_set_bit(
1055                         ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1056                         mac->supported);
1057 }
1058
1059 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1060 {
1061         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1062                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1063                                  mac->supported);
1064         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1065                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1066                                  mac->supported);
1067         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1068                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1069                                  mac->supported);
1070         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1071                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1072                                  mac->supported);
1073         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1074                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1075                                  mac->supported);
1076         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1077                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1078                                  mac->supported);
1079 }
1080
1081 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1082 {
1083         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1084                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1085                                  mac->supported);
1086         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1087                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1088                                  mac->supported);
1089         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1090                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1091                                  mac->supported);
1092         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1093                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1094                                  mac->supported);
1095         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1096                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1097                                  mac->supported);
1098         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1099                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1100                                  mac->supported);
1101         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1102                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1103                                  mac->supported);
1104 }
1105
1106 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1107 {
1108         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1109         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1110
1111         switch (mac->speed) {
1112         case HCLGE_MAC_SPEED_10G:
1113         case HCLGE_MAC_SPEED_40G:
1114                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1115                                  mac->supported);
1116                 mac->fec_ability =
1117                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1118                 break;
1119         case HCLGE_MAC_SPEED_25G:
1120         case HCLGE_MAC_SPEED_50G:
1121                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1122                                  mac->supported);
1123                 mac->fec_ability =
1124                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1125                         BIT(HNAE3_FEC_AUTO);
1126                 break;
1127         case HCLGE_MAC_SPEED_100G:
1128         case HCLGE_MAC_SPEED_200G:
1129                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1130                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1131                 break;
1132         default:
1133                 mac->fec_ability = 0;
1134                 break;
1135         }
1136 }
1137
1138 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1139                                         u16 speed_ability)
1140 {
1141         struct hclge_mac *mac = &hdev->hw.mac;
1142
1143         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1144                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1145                                  mac->supported);
1146
1147         hclge_convert_setting_sr(mac, speed_ability);
1148         hclge_convert_setting_lr(mac, speed_ability);
1149         hclge_convert_setting_cr(mac, speed_ability);
1150         if (hnae3_dev_fec_supported(hdev))
1151                 hclge_convert_setting_fec(mac);
1152
1153         if (hnae3_dev_pause_supported(hdev))
1154                 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1155
1156         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1157         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1158 }
1159
1160 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1161                                             u16 speed_ability)
1162 {
1163         struct hclge_mac *mac = &hdev->hw.mac;
1164
1165         hclge_convert_setting_kr(mac, speed_ability);
1166         if (hnae3_dev_fec_supported(hdev))
1167                 hclge_convert_setting_fec(mac);
1168
1169         if (hnae3_dev_pause_supported(hdev))
1170                 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1171
1172         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1173         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1174 }
1175
1176 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1177                                          u16 speed_ability)
1178 {
1179         unsigned long *supported = hdev->hw.mac.supported;
1180
1181         /* default to support all speed for GE port */
1182         if (!speed_ability)
1183                 speed_ability = HCLGE_SUPPORT_GE;
1184
1185         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1186                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1187                                  supported);
1188
1189         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1190                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1191                                  supported);
1192                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1193                                  supported);
1194         }
1195
1196         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1197                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1198                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1199         }
1200
1201         if (hnae3_dev_pause_supported(hdev)) {
1202                 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1203                 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1204         }
1205
1206         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1207         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1208 }
1209
1210 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1211 {
1212         u8 media_type = hdev->hw.mac.media_type;
1213
1214         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1215                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1216         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1217                 hclge_parse_copper_link_mode(hdev, speed_ability);
1218         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1219                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1220 }
1221
1222 static u32 hclge_get_max_speed(u16 speed_ability)
1223 {
1224         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1225                 return HCLGE_MAC_SPEED_200G;
1226
1227         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1228                 return HCLGE_MAC_SPEED_100G;
1229
1230         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1231                 return HCLGE_MAC_SPEED_50G;
1232
1233         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1234                 return HCLGE_MAC_SPEED_40G;
1235
1236         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1237                 return HCLGE_MAC_SPEED_25G;
1238
1239         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1240                 return HCLGE_MAC_SPEED_10G;
1241
1242         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1243                 return HCLGE_MAC_SPEED_1G;
1244
1245         if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1246                 return HCLGE_MAC_SPEED_100M;
1247
1248         if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1249                 return HCLGE_MAC_SPEED_10M;
1250
1251         return HCLGE_MAC_SPEED_1G;
1252 }
1253
1254 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1255 {
1256 #define SPEED_ABILITY_EXT_SHIFT                 8
1257
1258         struct hclge_cfg_param_cmd *req;
1259         u64 mac_addr_tmp_high;
1260         u16 speed_ability_ext;
1261         u64 mac_addr_tmp;
1262         unsigned int i;
1263
1264         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1265
1266         /* get the configuration */
1267         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1268                                               HCLGE_CFG_VMDQ_M,
1269                                               HCLGE_CFG_VMDQ_S);
1270         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1271                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1272         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1273                                             HCLGE_CFG_TQP_DESC_N_M,
1274                                             HCLGE_CFG_TQP_DESC_N_S);
1275
1276         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1277                                         HCLGE_CFG_PHY_ADDR_M,
1278                                         HCLGE_CFG_PHY_ADDR_S);
1279         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1280                                           HCLGE_CFG_MEDIA_TP_M,
1281                                           HCLGE_CFG_MEDIA_TP_S);
1282         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1283                                           HCLGE_CFG_RX_BUF_LEN_M,
1284                                           HCLGE_CFG_RX_BUF_LEN_S);
1285         /* get mac_address */
1286         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1287         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1288                                             HCLGE_CFG_MAC_ADDR_H_M,
1289                                             HCLGE_CFG_MAC_ADDR_H_S);
1290
1291         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1292
1293         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1294                                              HCLGE_CFG_DEFAULT_SPEED_M,
1295                                              HCLGE_CFG_DEFAULT_SPEED_S);
1296         cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1297                                                HCLGE_CFG_RSS_SIZE_M,
1298                                                HCLGE_CFG_RSS_SIZE_S);
1299
1300         for (i = 0; i < ETH_ALEN; i++)
1301                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1302
1303         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1304         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1305
1306         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1307                                              HCLGE_CFG_SPEED_ABILITY_M,
1308                                              HCLGE_CFG_SPEED_ABILITY_S);
1309         speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1310                                             HCLGE_CFG_SPEED_ABILITY_EXT_M,
1311                                             HCLGE_CFG_SPEED_ABILITY_EXT_S);
1312         cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1313
1314         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1315                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1316                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1317         if (!cfg->umv_space)
1318                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1319
1320         cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1321                                                HCLGE_CFG_PF_RSS_SIZE_M,
1322                                                HCLGE_CFG_PF_RSS_SIZE_S);
1323
1324         /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1325          * power of 2, instead of reading out directly. This would
1326          * be more flexible for future changes and expansions.
1327          * When VF max  rss size field is HCLGE_CFG_RSS_SIZE_S,
1328          * it does not make sense if PF's field is 0. In this case, PF and VF
1329          * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1330          */
1331         cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1332                                1U << cfg->pf_rss_size_max :
1333                                cfg->vf_rss_size_max;
1334 }
1335
1336 /* hclge_get_cfg: query the static parameter from flash
1337  * @hdev: pointer to struct hclge_dev
1338  * @hcfg: the config structure to be getted
1339  */
1340 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1341 {
1342         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1343         struct hclge_cfg_param_cmd *req;
1344         unsigned int i;
1345         int ret;
1346
1347         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1348                 u32 offset = 0;
1349
1350                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1351                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1352                                            true);
1353                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1354                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1355                 /* Len should be united by 4 bytes when send to hardware */
1356                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1357                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1358                 req->offset = cpu_to_le32(offset);
1359         }
1360
1361         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1362         if (ret) {
1363                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1364                 return ret;
1365         }
1366
1367         hclge_parse_cfg(hcfg, desc);
1368
1369         return 0;
1370 }
1371
1372 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1373 {
1374 #define HCLGE_MAX_NON_TSO_BD_NUM                        8U
1375
1376         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1377
1378         ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1379         ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1380         ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1381         ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1382         ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1383         ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1384         ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1385 }
1386
1387 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1388                                   struct hclge_desc *desc)
1389 {
1390         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1391         struct hclge_dev_specs_0_cmd *req0;
1392         struct hclge_dev_specs_1_cmd *req1;
1393
1394         req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1395         req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1396
1397         ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1398         ae_dev->dev_specs.rss_ind_tbl_size =
1399                 le16_to_cpu(req0->rss_ind_tbl_size);
1400         ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1401         ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1402         ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1403         ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1404         ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1405         ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1406 }
1407
1408 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1409 {
1410         struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1411
1412         if (!dev_specs->max_non_tso_bd_num)
1413                 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1414         if (!dev_specs->rss_ind_tbl_size)
1415                 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1416         if (!dev_specs->rss_key_size)
1417                 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1418         if (!dev_specs->max_tm_rate)
1419                 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1420         if (!dev_specs->max_qset_num)
1421                 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1422         if (!dev_specs->max_int_gl)
1423                 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1424         if (!dev_specs->max_frm_size)
1425                 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1426 }
1427
1428 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1429 {
1430         struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1431         int ret;
1432         int i;
1433
1434         /* set default specifications as devices lower than version V3 do not
1435          * support querying specifications from firmware.
1436          */
1437         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1438                 hclge_set_default_dev_specs(hdev);
1439                 return 0;
1440         }
1441
1442         for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1443                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1444                                            true);
1445                 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1446         }
1447         hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1448
1449         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1450         if (ret)
1451                 return ret;
1452
1453         hclge_parse_dev_specs(hdev, desc);
1454         hclge_check_dev_specs(hdev);
1455
1456         return 0;
1457 }
1458
1459 static int hclge_get_cap(struct hclge_dev *hdev)
1460 {
1461         int ret;
1462
1463         ret = hclge_query_function_status(hdev);
1464         if (ret) {
1465                 dev_err(&hdev->pdev->dev,
1466                         "query function status error %d.\n", ret);
1467                 return ret;
1468         }
1469
1470         /* get pf resource */
1471         return hclge_query_pf_resource(hdev);
1472 }
1473
1474 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1475 {
1476 #define HCLGE_MIN_TX_DESC       64
1477 #define HCLGE_MIN_RX_DESC       64
1478
1479         if (!is_kdump_kernel())
1480                 return;
1481
1482         dev_info(&hdev->pdev->dev,
1483                  "Running kdump kernel. Using minimal resources\n");
1484
1485         /* minimal queue pairs equals to the number of vports */
1486         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1487         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1488         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1489 }
1490
1491 static int hclge_configure(struct hclge_dev *hdev)
1492 {
1493         struct hclge_cfg cfg;
1494         unsigned int i;
1495         int ret;
1496
1497         ret = hclge_get_cfg(hdev, &cfg);
1498         if (ret)
1499                 return ret;
1500
1501         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1502         hdev->base_tqp_pid = 0;
1503         hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1504         hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1505         hdev->rx_buf_len = cfg.rx_buf_len;
1506         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1507         hdev->hw.mac.media_type = cfg.media_type;
1508         hdev->hw.mac.phy_addr = cfg.phy_addr;
1509         hdev->num_tx_desc = cfg.tqp_desc_num;
1510         hdev->num_rx_desc = cfg.tqp_desc_num;
1511         hdev->tm_info.num_pg = 1;
1512         hdev->tc_max = cfg.tc_num;
1513         hdev->tm_info.hw_pfc_map = 0;
1514         hdev->wanted_umv_size = cfg.umv_space;
1515
1516         if (hnae3_dev_fd_supported(hdev)) {
1517                 hdev->fd_en = true;
1518                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1519         }
1520
1521         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1522         if (ret) {
1523                 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1524                         cfg.default_speed, ret);
1525                 return ret;
1526         }
1527
1528         hclge_parse_link_mode(hdev, cfg.speed_ability);
1529
1530         hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1531
1532         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1533             (hdev->tc_max < 1)) {
1534                 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1535                          hdev->tc_max);
1536                 hdev->tc_max = 1;
1537         }
1538
1539         /* Dev does not support DCB */
1540         if (!hnae3_dev_dcb_supported(hdev)) {
1541                 hdev->tc_max = 1;
1542                 hdev->pfc_max = 0;
1543         } else {
1544                 hdev->pfc_max = hdev->tc_max;
1545         }
1546
1547         hdev->tm_info.num_tc = 1;
1548
1549         /* Currently not support uncontiuous tc */
1550         for (i = 0; i < hdev->tm_info.num_tc; i++)
1551                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1552
1553         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1554
1555         hclge_init_kdump_kernel_config(hdev);
1556
1557         /* Set the init affinity based on pci func number */
1558         i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1559         i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1560         cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1561                         &hdev->affinity_mask);
1562
1563         return ret;
1564 }
1565
1566 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1567                             u16 tso_mss_max)
1568 {
1569         struct hclge_cfg_tso_status_cmd *req;
1570         struct hclge_desc desc;
1571
1572         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1573
1574         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1575         req->tso_mss_min = cpu_to_le16(tso_mss_min);
1576         req->tso_mss_max = cpu_to_le16(tso_mss_max);
1577
1578         return hclge_cmd_send(&hdev->hw, &desc, 1);
1579 }
1580
1581 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1582 {
1583         struct hclge_cfg_gro_status_cmd *req;
1584         struct hclge_desc desc;
1585         int ret;
1586
1587         if (!hnae3_dev_gro_supported(hdev))
1588                 return 0;
1589
1590         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1591         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1592
1593         req->gro_en = en ? 1 : 0;
1594
1595         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1596         if (ret)
1597                 dev_err(&hdev->pdev->dev,
1598                         "GRO hardware config cmd failed, ret = %d\n", ret);
1599
1600         return ret;
1601 }
1602
1603 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1604 {
1605         struct hclge_tqp *tqp;
1606         int i;
1607
1608         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1609                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1610         if (!hdev->htqp)
1611                 return -ENOMEM;
1612
1613         tqp = hdev->htqp;
1614
1615         for (i = 0; i < hdev->num_tqps; i++) {
1616                 tqp->dev = &hdev->pdev->dev;
1617                 tqp->index = i;
1618
1619                 tqp->q.ae_algo = &ae_algo;
1620                 tqp->q.buf_size = hdev->rx_buf_len;
1621                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1622                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1623
1624                 /* need an extended offset to configure queues >=
1625                  * HCLGE_TQP_MAX_SIZE_DEV_V2
1626                  */
1627                 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1628                         tqp->q.io_base = hdev->hw.io_base +
1629                                          HCLGE_TQP_REG_OFFSET +
1630                                          i * HCLGE_TQP_REG_SIZE;
1631                 else
1632                         tqp->q.io_base = hdev->hw.io_base +
1633                                          HCLGE_TQP_REG_OFFSET +
1634                                          HCLGE_TQP_EXT_REG_OFFSET +
1635                                          (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1636                                          HCLGE_TQP_REG_SIZE;
1637
1638                 tqp++;
1639         }
1640
1641         return 0;
1642 }
1643
1644 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1645                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1646 {
1647         struct hclge_tqp_map_cmd *req;
1648         struct hclge_desc desc;
1649         int ret;
1650
1651         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1652
1653         req = (struct hclge_tqp_map_cmd *)desc.data;
1654         req->tqp_id = cpu_to_le16(tqp_pid);
1655         req->tqp_vf = func_id;
1656         req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1657         if (!is_pf)
1658                 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1659         req->tqp_vid = cpu_to_le16(tqp_vid);
1660
1661         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1662         if (ret)
1663                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1664
1665         return ret;
1666 }
1667
1668 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1669 {
1670         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1671         struct hclge_dev *hdev = vport->back;
1672         int i, alloced;
1673
1674         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1675              alloced < num_tqps; i++) {
1676                 if (!hdev->htqp[i].alloced) {
1677                         hdev->htqp[i].q.handle = &vport->nic;
1678                         hdev->htqp[i].q.tqp_index = alloced;
1679                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1680                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1681                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1682                         hdev->htqp[i].alloced = true;
1683                         alloced++;
1684                 }
1685         }
1686         vport->alloc_tqps = alloced;
1687         kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1688                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1689
1690         /* ensure one to one mapping between irq and queue at default */
1691         kinfo->rss_size = min_t(u16, kinfo->rss_size,
1692                                 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1693
1694         return 0;
1695 }
1696
1697 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1698                             u16 num_tx_desc, u16 num_rx_desc)
1699
1700 {
1701         struct hnae3_handle *nic = &vport->nic;
1702         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1703         struct hclge_dev *hdev = vport->back;
1704         int ret;
1705
1706         kinfo->num_tx_desc = num_tx_desc;
1707         kinfo->num_rx_desc = num_rx_desc;
1708
1709         kinfo->rx_buf_len = hdev->rx_buf_len;
1710
1711         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1712                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1713         if (!kinfo->tqp)
1714                 return -ENOMEM;
1715
1716         ret = hclge_assign_tqp(vport, num_tqps);
1717         if (ret)
1718                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1719
1720         return ret;
1721 }
1722
1723 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1724                                   struct hclge_vport *vport)
1725 {
1726         struct hnae3_handle *nic = &vport->nic;
1727         struct hnae3_knic_private_info *kinfo;
1728         u16 i;
1729
1730         kinfo = &nic->kinfo;
1731         for (i = 0; i < vport->alloc_tqps; i++) {
1732                 struct hclge_tqp *q =
1733                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1734                 bool is_pf;
1735                 int ret;
1736
1737                 is_pf = !(vport->vport_id);
1738                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1739                                              i, is_pf);
1740                 if (ret)
1741                         return ret;
1742         }
1743
1744         return 0;
1745 }
1746
1747 static int hclge_map_tqp(struct hclge_dev *hdev)
1748 {
1749         struct hclge_vport *vport = hdev->vport;
1750         u16 i, num_vport;
1751
1752         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1753         for (i = 0; i < num_vport; i++) {
1754                 int ret;
1755
1756                 ret = hclge_map_tqp_to_vport(hdev, vport);
1757                 if (ret)
1758                         return ret;
1759
1760                 vport++;
1761         }
1762
1763         return 0;
1764 }
1765
1766 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1767 {
1768         struct hnae3_handle *nic = &vport->nic;
1769         struct hclge_dev *hdev = vport->back;
1770         int ret;
1771
1772         nic->pdev = hdev->pdev;
1773         nic->ae_algo = &ae_algo;
1774         nic->numa_node_mask = hdev->numa_node_mask;
1775
1776         ret = hclge_knic_setup(vport, num_tqps,
1777                                hdev->num_tx_desc, hdev->num_rx_desc);
1778         if (ret)
1779                 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1780
1781         return ret;
1782 }
1783
1784 static int hclge_alloc_vport(struct hclge_dev *hdev)
1785 {
1786         struct pci_dev *pdev = hdev->pdev;
1787         struct hclge_vport *vport;
1788         u32 tqp_main_vport;
1789         u32 tqp_per_vport;
1790         int num_vport, i;
1791         int ret;
1792
1793         /* We need to alloc a vport for main NIC of PF */
1794         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1795
1796         if (hdev->num_tqps < num_vport) {
1797                 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1798                         hdev->num_tqps, num_vport);
1799                 return -EINVAL;
1800         }
1801
1802         /* Alloc the same number of TQPs for every vport */
1803         tqp_per_vport = hdev->num_tqps / num_vport;
1804         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1805
1806         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1807                              GFP_KERNEL);
1808         if (!vport)
1809                 return -ENOMEM;
1810
1811         hdev->vport = vport;
1812         hdev->num_alloc_vport = num_vport;
1813
1814         if (IS_ENABLED(CONFIG_PCI_IOV))
1815                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1816
1817         for (i = 0; i < num_vport; i++) {
1818                 vport->back = hdev;
1819                 vport->vport_id = i;
1820                 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1821                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1822                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1823                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1824                 INIT_LIST_HEAD(&vport->vlan_list);
1825                 INIT_LIST_HEAD(&vport->uc_mac_list);
1826                 INIT_LIST_HEAD(&vport->mc_mac_list);
1827                 spin_lock_init(&vport->mac_list_lock);
1828
1829                 if (i == 0)
1830                         ret = hclge_vport_setup(vport, tqp_main_vport);
1831                 else
1832                         ret = hclge_vport_setup(vport, tqp_per_vport);
1833                 if (ret) {
1834                         dev_err(&pdev->dev,
1835                                 "vport setup failed for vport %d, %d\n",
1836                                 i, ret);
1837                         return ret;
1838                 }
1839
1840                 vport++;
1841         }
1842
1843         return 0;
1844 }
1845
1846 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1847                                     struct hclge_pkt_buf_alloc *buf_alloc)
1848 {
1849 /* TX buffer size is unit by 128 byte */
1850 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1851 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1852         struct hclge_tx_buff_alloc_cmd *req;
1853         struct hclge_desc desc;
1854         int ret;
1855         u8 i;
1856
1857         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1858
1859         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1860         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1861                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1862
1863                 req->tx_pkt_buff[i] =
1864                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1865                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1866         }
1867
1868         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1869         if (ret)
1870                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1871                         ret);
1872
1873         return ret;
1874 }
1875
1876 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1877                                  struct hclge_pkt_buf_alloc *buf_alloc)
1878 {
1879         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1880
1881         if (ret)
1882                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1883
1884         return ret;
1885 }
1886
1887 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1888 {
1889         unsigned int i;
1890         u32 cnt = 0;
1891
1892         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1893                 if (hdev->hw_tc_map & BIT(i))
1894                         cnt++;
1895         return cnt;
1896 }
1897
1898 /* Get the number of pfc enabled TCs, which have private buffer */
1899 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1900                                   struct hclge_pkt_buf_alloc *buf_alloc)
1901 {
1902         struct hclge_priv_buf *priv;
1903         unsigned int i;
1904         int cnt = 0;
1905
1906         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1907                 priv = &buf_alloc->priv_buf[i];
1908                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1909                     priv->enable)
1910                         cnt++;
1911         }
1912
1913         return cnt;
1914 }
1915
1916 /* Get the number of pfc disabled TCs, which have private buffer */
1917 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1918                                      struct hclge_pkt_buf_alloc *buf_alloc)
1919 {
1920         struct hclge_priv_buf *priv;
1921         unsigned int i;
1922         int cnt = 0;
1923
1924         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1925                 priv = &buf_alloc->priv_buf[i];
1926                 if (hdev->hw_tc_map & BIT(i) &&
1927                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1928                     priv->enable)
1929                         cnt++;
1930         }
1931
1932         return cnt;
1933 }
1934
1935 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1936 {
1937         struct hclge_priv_buf *priv;
1938         u32 rx_priv = 0;
1939         int i;
1940
1941         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1942                 priv = &buf_alloc->priv_buf[i];
1943                 if (priv->enable)
1944                         rx_priv += priv->buf_size;
1945         }
1946         return rx_priv;
1947 }
1948
1949 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1950 {
1951         u32 i, total_tx_size = 0;
1952
1953         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1954                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1955
1956         return total_tx_size;
1957 }
1958
1959 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1960                                 struct hclge_pkt_buf_alloc *buf_alloc,
1961                                 u32 rx_all)
1962 {
1963         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1964         u32 tc_num = hclge_get_tc_num(hdev);
1965         u32 shared_buf, aligned_mps;
1966         u32 rx_priv;
1967         int i;
1968
1969         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1970
1971         if (hnae3_dev_dcb_supported(hdev))
1972                 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1973                                         hdev->dv_buf_size;
1974         else
1975                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1976                                         + hdev->dv_buf_size;
1977
1978         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1979         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1980                              HCLGE_BUF_SIZE_UNIT);
1981
1982         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1983         if (rx_all < rx_priv + shared_std)
1984                 return false;
1985
1986         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1987         buf_alloc->s_buf.buf_size = shared_buf;
1988         if (hnae3_dev_dcb_supported(hdev)) {
1989                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1990                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1991                         - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1992                                   HCLGE_BUF_SIZE_UNIT);
1993         } else {
1994                 buf_alloc->s_buf.self.high = aligned_mps +
1995                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1996                 buf_alloc->s_buf.self.low = aligned_mps;
1997         }
1998
1999         if (hnae3_dev_dcb_supported(hdev)) {
2000                 hi_thrd = shared_buf - hdev->dv_buf_size;
2001
2002                 if (tc_num <= NEED_RESERVE_TC_NUM)
2003                         hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2004                                         / BUF_MAX_PERCENT;
2005
2006                 if (tc_num)
2007                         hi_thrd = hi_thrd / tc_num;
2008
2009                 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2010                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2011                 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2012         } else {
2013                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2014                 lo_thrd = aligned_mps;
2015         }
2016
2017         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2018                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2019                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2020         }
2021
2022         return true;
2023 }
2024
2025 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2026                                 struct hclge_pkt_buf_alloc *buf_alloc)
2027 {
2028         u32 i, total_size;
2029
2030         total_size = hdev->pkt_buf_size;
2031
2032         /* alloc tx buffer for all enabled tc */
2033         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2034                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2035
2036                 if (hdev->hw_tc_map & BIT(i)) {
2037                         if (total_size < hdev->tx_buf_size)
2038                                 return -ENOMEM;
2039
2040                         priv->tx_buf_size = hdev->tx_buf_size;
2041                 } else {
2042                         priv->tx_buf_size = 0;
2043                 }
2044
2045                 total_size -= priv->tx_buf_size;
2046         }
2047
2048         return 0;
2049 }
2050
2051 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2052                                   struct hclge_pkt_buf_alloc *buf_alloc)
2053 {
2054         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2055         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2056         unsigned int i;
2057
2058         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2059                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2060
2061                 priv->enable = 0;
2062                 priv->wl.low = 0;
2063                 priv->wl.high = 0;
2064                 priv->buf_size = 0;
2065
2066                 if (!(hdev->hw_tc_map & BIT(i)))
2067                         continue;
2068
2069                 priv->enable = 1;
2070
2071                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2072                         priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2073                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
2074                                                 HCLGE_BUF_SIZE_UNIT);
2075                 } else {
2076                         priv->wl.low = 0;
2077                         priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2078                                         aligned_mps;
2079                 }
2080
2081                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2082         }
2083
2084         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2085 }
2086
2087 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2088                                           struct hclge_pkt_buf_alloc *buf_alloc)
2089 {
2090         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2091         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2092         int i;
2093
2094         /* let the last to be cleared first */
2095         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2096                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2097                 unsigned int mask = BIT((unsigned int)i);
2098
2099                 if (hdev->hw_tc_map & mask &&
2100                     !(hdev->tm_info.hw_pfc_map & mask)) {
2101                         /* Clear the no pfc TC private buffer */
2102                         priv->wl.low = 0;
2103                         priv->wl.high = 0;
2104                         priv->buf_size = 0;
2105                         priv->enable = 0;
2106                         no_pfc_priv_num--;
2107                 }
2108
2109                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2110                     no_pfc_priv_num == 0)
2111                         break;
2112         }
2113
2114         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2115 }
2116
2117 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2118                                         struct hclge_pkt_buf_alloc *buf_alloc)
2119 {
2120         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2121         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2122         int i;
2123
2124         /* let the last to be cleared first */
2125         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2126                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2127                 unsigned int mask = BIT((unsigned int)i);
2128
2129                 if (hdev->hw_tc_map & mask &&
2130                     hdev->tm_info.hw_pfc_map & mask) {
2131                         /* Reduce the number of pfc TC with private buffer */
2132                         priv->wl.low = 0;
2133                         priv->enable = 0;
2134                         priv->wl.high = 0;
2135                         priv->buf_size = 0;
2136                         pfc_priv_num--;
2137                 }
2138
2139                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2140                     pfc_priv_num == 0)
2141                         break;
2142         }
2143
2144         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2145 }
2146
2147 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2148                                       struct hclge_pkt_buf_alloc *buf_alloc)
2149 {
2150 #define COMPENSATE_BUFFER       0x3C00
2151 #define COMPENSATE_HALF_MPS_NUM 5
2152 #define PRIV_WL_GAP             0x1800
2153
2154         u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2155         u32 tc_num = hclge_get_tc_num(hdev);
2156         u32 half_mps = hdev->mps >> 1;
2157         u32 min_rx_priv;
2158         unsigned int i;
2159
2160         if (tc_num)
2161                 rx_priv = rx_priv / tc_num;
2162
2163         if (tc_num <= NEED_RESERVE_TC_NUM)
2164                 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2165
2166         min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2167                         COMPENSATE_HALF_MPS_NUM * half_mps;
2168         min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2169         rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2170
2171         if (rx_priv < min_rx_priv)
2172                 return false;
2173
2174         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2175                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2176
2177                 priv->enable = 0;
2178                 priv->wl.low = 0;
2179                 priv->wl.high = 0;
2180                 priv->buf_size = 0;
2181
2182                 if (!(hdev->hw_tc_map & BIT(i)))
2183                         continue;
2184
2185                 priv->enable = 1;
2186                 priv->buf_size = rx_priv;
2187                 priv->wl.high = rx_priv - hdev->dv_buf_size;
2188                 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2189         }
2190
2191         buf_alloc->s_buf.buf_size = 0;
2192
2193         return true;
2194 }
2195
2196 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2197  * @hdev: pointer to struct hclge_dev
2198  * @buf_alloc: pointer to buffer calculation data
2199  * @return: 0: calculate sucessful, negative: fail
2200  */
2201 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2202                                 struct hclge_pkt_buf_alloc *buf_alloc)
2203 {
2204         /* When DCB is not supported, rx private buffer is not allocated. */
2205         if (!hnae3_dev_dcb_supported(hdev)) {
2206                 u32 rx_all = hdev->pkt_buf_size;
2207
2208                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2209                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2210                         return -ENOMEM;
2211
2212                 return 0;
2213         }
2214
2215         if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2216                 return 0;
2217
2218         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2219                 return 0;
2220
2221         /* try to decrease the buffer size */
2222         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2223                 return 0;
2224
2225         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2226                 return 0;
2227
2228         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2229                 return 0;
2230
2231         return -ENOMEM;
2232 }
2233
2234 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2235                                    struct hclge_pkt_buf_alloc *buf_alloc)
2236 {
2237         struct hclge_rx_priv_buff_cmd *req;
2238         struct hclge_desc desc;
2239         int ret;
2240         int i;
2241
2242         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2243         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2244
2245         /* Alloc private buffer TCs */
2246         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2247                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2248
2249                 req->buf_num[i] =
2250                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2251                 req->buf_num[i] |=
2252                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2253         }
2254
2255         req->shared_buf =
2256                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2257                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
2258
2259         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2260         if (ret)
2261                 dev_err(&hdev->pdev->dev,
2262                         "rx private buffer alloc cmd failed %d\n", ret);
2263
2264         return ret;
2265 }
2266
2267 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2268                                    struct hclge_pkt_buf_alloc *buf_alloc)
2269 {
2270         struct hclge_rx_priv_wl_buf *req;
2271         struct hclge_priv_buf *priv;
2272         struct hclge_desc desc[2];
2273         int i, j;
2274         int ret;
2275
2276         for (i = 0; i < 2; i++) {
2277                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2278                                            false);
2279                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2280
2281                 /* The first descriptor set the NEXT bit to 1 */
2282                 if (i == 0)
2283                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2284                 else
2285                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2286
2287                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2288                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2289
2290                         priv = &buf_alloc->priv_buf[idx];
2291                         req->tc_wl[j].high =
2292                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2293                         req->tc_wl[j].high |=
2294                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2295                         req->tc_wl[j].low =
2296                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2297                         req->tc_wl[j].low |=
2298                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2299                 }
2300         }
2301
2302         /* Send 2 descriptor at one time */
2303         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2304         if (ret)
2305                 dev_err(&hdev->pdev->dev,
2306                         "rx private waterline config cmd failed %d\n",
2307                         ret);
2308         return ret;
2309 }
2310
2311 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2312                                     struct hclge_pkt_buf_alloc *buf_alloc)
2313 {
2314         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2315         struct hclge_rx_com_thrd *req;
2316         struct hclge_desc desc[2];
2317         struct hclge_tc_thrd *tc;
2318         int i, j;
2319         int ret;
2320
2321         for (i = 0; i < 2; i++) {
2322                 hclge_cmd_setup_basic_desc(&desc[i],
2323                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2324                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2325
2326                 /* The first descriptor set the NEXT bit to 1 */
2327                 if (i == 0)
2328                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2329                 else
2330                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2331
2332                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2333                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2334
2335                         req->com_thrd[j].high =
2336                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2337                         req->com_thrd[j].high |=
2338                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2339                         req->com_thrd[j].low =
2340                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2341                         req->com_thrd[j].low |=
2342                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2343                 }
2344         }
2345
2346         /* Send 2 descriptors at one time */
2347         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2348         if (ret)
2349                 dev_err(&hdev->pdev->dev,
2350                         "common threshold config cmd failed %d\n", ret);
2351         return ret;
2352 }
2353
2354 static int hclge_common_wl_config(struct hclge_dev *hdev,
2355                                   struct hclge_pkt_buf_alloc *buf_alloc)
2356 {
2357         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2358         struct hclge_rx_com_wl *req;
2359         struct hclge_desc desc;
2360         int ret;
2361
2362         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2363
2364         req = (struct hclge_rx_com_wl *)desc.data;
2365         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2366         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2367
2368         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2369         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2370
2371         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2372         if (ret)
2373                 dev_err(&hdev->pdev->dev,
2374                         "common waterline config cmd failed %d\n", ret);
2375
2376         return ret;
2377 }
2378
2379 int hclge_buffer_alloc(struct hclge_dev *hdev)
2380 {
2381         struct hclge_pkt_buf_alloc *pkt_buf;
2382         int ret;
2383
2384         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2385         if (!pkt_buf)
2386                 return -ENOMEM;
2387
2388         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2389         if (ret) {
2390                 dev_err(&hdev->pdev->dev,
2391                         "could not calc tx buffer size for all TCs %d\n", ret);
2392                 goto out;
2393         }
2394
2395         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2396         if (ret) {
2397                 dev_err(&hdev->pdev->dev,
2398                         "could not alloc tx buffers %d\n", ret);
2399                 goto out;
2400         }
2401
2402         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2403         if (ret) {
2404                 dev_err(&hdev->pdev->dev,
2405                         "could not calc rx priv buffer size for all TCs %d\n",
2406                         ret);
2407                 goto out;
2408         }
2409
2410         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2411         if (ret) {
2412                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2413                         ret);
2414                 goto out;
2415         }
2416
2417         if (hnae3_dev_dcb_supported(hdev)) {
2418                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2419                 if (ret) {
2420                         dev_err(&hdev->pdev->dev,
2421                                 "could not configure rx private waterline %d\n",
2422                                 ret);
2423                         goto out;
2424                 }
2425
2426                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2427                 if (ret) {
2428                         dev_err(&hdev->pdev->dev,
2429                                 "could not configure common threshold %d\n",
2430                                 ret);
2431                         goto out;
2432                 }
2433         }
2434
2435         ret = hclge_common_wl_config(hdev, pkt_buf);
2436         if (ret)
2437                 dev_err(&hdev->pdev->dev,
2438                         "could not configure common waterline %d\n", ret);
2439
2440 out:
2441         kfree(pkt_buf);
2442         return ret;
2443 }
2444
2445 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2446 {
2447         struct hnae3_handle *roce = &vport->roce;
2448         struct hnae3_handle *nic = &vport->nic;
2449         struct hclge_dev *hdev = vport->back;
2450
2451         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2452
2453         if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2454                 return -EINVAL;
2455
2456         roce->rinfo.base_vector = hdev->roce_base_vector;
2457
2458         roce->rinfo.netdev = nic->kinfo.netdev;
2459         roce->rinfo.roce_io_base = hdev->hw.io_base;
2460         roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2461
2462         roce->pdev = nic->pdev;
2463         roce->ae_algo = nic->ae_algo;
2464         roce->numa_node_mask = nic->numa_node_mask;
2465
2466         return 0;
2467 }
2468
2469 static int hclge_init_msi(struct hclge_dev *hdev)
2470 {
2471         struct pci_dev *pdev = hdev->pdev;
2472         int vectors;
2473         int i;
2474
2475         vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2476                                         hdev->num_msi,
2477                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2478         if (vectors < 0) {
2479                 dev_err(&pdev->dev,
2480                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2481                         vectors);
2482                 return vectors;
2483         }
2484         if (vectors < hdev->num_msi)
2485                 dev_warn(&hdev->pdev->dev,
2486                          "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2487                          hdev->num_msi, vectors);
2488
2489         hdev->num_msi = vectors;
2490         hdev->num_msi_left = vectors;
2491
2492         hdev->base_msi_vector = pdev->irq;
2493         hdev->roce_base_vector = hdev->base_msi_vector +
2494                                 hdev->num_nic_msi;
2495
2496         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2497                                            sizeof(u16), GFP_KERNEL);
2498         if (!hdev->vector_status) {
2499                 pci_free_irq_vectors(pdev);
2500                 return -ENOMEM;
2501         }
2502
2503         for (i = 0; i < hdev->num_msi; i++)
2504                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2505
2506         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2507                                         sizeof(int), GFP_KERNEL);
2508         if (!hdev->vector_irq) {
2509                 pci_free_irq_vectors(pdev);
2510                 return -ENOMEM;
2511         }
2512
2513         return 0;
2514 }
2515
2516 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2517 {
2518         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2519                 duplex = HCLGE_MAC_FULL;
2520
2521         return duplex;
2522 }
2523
2524 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2525                                       u8 duplex)
2526 {
2527         struct hclge_config_mac_speed_dup_cmd *req;
2528         struct hclge_desc desc;
2529         int ret;
2530
2531         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2532
2533         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2534
2535         if (duplex)
2536                 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2537
2538         switch (speed) {
2539         case HCLGE_MAC_SPEED_10M:
2540                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2541                                 HCLGE_CFG_SPEED_S, 6);
2542                 break;
2543         case HCLGE_MAC_SPEED_100M:
2544                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2545                                 HCLGE_CFG_SPEED_S, 7);
2546                 break;
2547         case HCLGE_MAC_SPEED_1G:
2548                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2549                                 HCLGE_CFG_SPEED_S, 0);
2550                 break;
2551         case HCLGE_MAC_SPEED_10G:
2552                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2553                                 HCLGE_CFG_SPEED_S, 1);
2554                 break;
2555         case HCLGE_MAC_SPEED_25G:
2556                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2557                                 HCLGE_CFG_SPEED_S, 2);
2558                 break;
2559         case HCLGE_MAC_SPEED_40G:
2560                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2561                                 HCLGE_CFG_SPEED_S, 3);
2562                 break;
2563         case HCLGE_MAC_SPEED_50G:
2564                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2565                                 HCLGE_CFG_SPEED_S, 4);
2566                 break;
2567         case HCLGE_MAC_SPEED_100G:
2568                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2569                                 HCLGE_CFG_SPEED_S, 5);
2570                 break;
2571         case HCLGE_MAC_SPEED_200G:
2572                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2573                                 HCLGE_CFG_SPEED_S, 8);
2574                 break;
2575         default:
2576                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2577                 return -EINVAL;
2578         }
2579
2580         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2581                       1);
2582
2583         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2584         if (ret) {
2585                 dev_err(&hdev->pdev->dev,
2586                         "mac speed/duplex config cmd failed %d.\n", ret);
2587                 return ret;
2588         }
2589
2590         return 0;
2591 }
2592
2593 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2594 {
2595         struct hclge_mac *mac = &hdev->hw.mac;
2596         int ret;
2597
2598         duplex = hclge_check_speed_dup(duplex, speed);
2599         if (!mac->support_autoneg && mac->speed == speed &&
2600             mac->duplex == duplex)
2601                 return 0;
2602
2603         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2604         if (ret)
2605                 return ret;
2606
2607         hdev->hw.mac.speed = speed;
2608         hdev->hw.mac.duplex = duplex;
2609
2610         return 0;
2611 }
2612
2613 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2614                                      u8 duplex)
2615 {
2616         struct hclge_vport *vport = hclge_get_vport(handle);
2617         struct hclge_dev *hdev = vport->back;
2618
2619         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2620 }
2621
2622 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2623 {
2624         struct hclge_config_auto_neg_cmd *req;
2625         struct hclge_desc desc;
2626         u32 flag = 0;
2627         int ret;
2628
2629         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2630
2631         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2632         if (enable)
2633                 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2634         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2635
2636         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2637         if (ret)
2638                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2639                         ret);
2640
2641         return ret;
2642 }
2643
2644 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2645 {
2646         struct hclge_vport *vport = hclge_get_vport(handle);
2647         struct hclge_dev *hdev = vport->back;
2648
2649         if (!hdev->hw.mac.support_autoneg) {
2650                 if (enable) {
2651                         dev_err(&hdev->pdev->dev,
2652                                 "autoneg is not supported by current port\n");
2653                         return -EOPNOTSUPP;
2654                 } else {
2655                         return 0;
2656                 }
2657         }
2658
2659         return hclge_set_autoneg_en(hdev, enable);
2660 }
2661
2662 static int hclge_get_autoneg(struct hnae3_handle *handle)
2663 {
2664         struct hclge_vport *vport = hclge_get_vport(handle);
2665         struct hclge_dev *hdev = vport->back;
2666         struct phy_device *phydev = hdev->hw.mac.phydev;
2667
2668         if (phydev)
2669                 return phydev->autoneg;
2670
2671         return hdev->hw.mac.autoneg;
2672 }
2673
2674 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2675 {
2676         struct hclge_vport *vport = hclge_get_vport(handle);
2677         struct hclge_dev *hdev = vport->back;
2678         int ret;
2679
2680         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2681
2682         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2683         if (ret)
2684                 return ret;
2685         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2686 }
2687
2688 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2689 {
2690         struct hclge_vport *vport = hclge_get_vport(handle);
2691         struct hclge_dev *hdev = vport->back;
2692
2693         if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2694                 return hclge_set_autoneg_en(hdev, !halt);
2695
2696         return 0;
2697 }
2698
2699 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2700 {
2701         struct hclge_config_fec_cmd *req;
2702         struct hclge_desc desc;
2703         int ret;
2704
2705         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2706
2707         req = (struct hclge_config_fec_cmd *)desc.data;
2708         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2709                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2710         if (fec_mode & BIT(HNAE3_FEC_RS))
2711                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2712                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2713         if (fec_mode & BIT(HNAE3_FEC_BASER))
2714                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2715                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2716
2717         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2718         if (ret)
2719                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2720
2721         return ret;
2722 }
2723
2724 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2725 {
2726         struct hclge_vport *vport = hclge_get_vport(handle);
2727         struct hclge_dev *hdev = vport->back;
2728         struct hclge_mac *mac = &hdev->hw.mac;
2729         int ret;
2730
2731         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2732                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2733                 return -EINVAL;
2734         }
2735
2736         ret = hclge_set_fec_hw(hdev, fec_mode);
2737         if (ret)
2738                 return ret;
2739
2740         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2741         return 0;
2742 }
2743
2744 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2745                           u8 *fec_mode)
2746 {
2747         struct hclge_vport *vport = hclge_get_vport(handle);
2748         struct hclge_dev *hdev = vport->back;
2749         struct hclge_mac *mac = &hdev->hw.mac;
2750
2751         if (fec_ability)
2752                 *fec_ability = mac->fec_ability;
2753         if (fec_mode)
2754                 *fec_mode = mac->fec_mode;
2755 }
2756
2757 static int hclge_mac_init(struct hclge_dev *hdev)
2758 {
2759         struct hclge_mac *mac = &hdev->hw.mac;
2760         int ret;
2761
2762         hdev->support_sfp_query = true;
2763         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2764         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2765                                          hdev->hw.mac.duplex);
2766         if (ret)
2767                 return ret;
2768
2769         if (hdev->hw.mac.support_autoneg) {
2770                 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2771                 if (ret)
2772                         return ret;
2773         }
2774
2775         mac->link = 0;
2776
2777         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2778                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2779                 if (ret)
2780                         return ret;
2781         }
2782
2783         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2784         if (ret) {
2785                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2786                 return ret;
2787         }
2788
2789         ret = hclge_set_default_loopback(hdev);
2790         if (ret)
2791                 return ret;
2792
2793         ret = hclge_buffer_alloc(hdev);
2794         if (ret)
2795                 dev_err(&hdev->pdev->dev,
2796                         "allocate buffer fail, ret=%d\n", ret);
2797
2798         return ret;
2799 }
2800
2801 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2802 {
2803         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2804             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2805                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2806                                     hclge_wq, &hdev->service_task, 0);
2807 }
2808
2809 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2810 {
2811         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2812             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2813                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2814                                     hclge_wq, &hdev->service_task, 0);
2815 }
2816
2817 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2818 {
2819         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2820             !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2821                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2822                                     hclge_wq, &hdev->service_task,
2823                                     delay_time);
2824 }
2825
2826 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2827 {
2828         struct hclge_link_status_cmd *req;
2829         struct hclge_desc desc;
2830         int ret;
2831
2832         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2833         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2834         if (ret) {
2835                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2836                         ret);
2837                 return ret;
2838         }
2839
2840         req = (struct hclge_link_status_cmd *)desc.data;
2841         *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2842                 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2843
2844         return 0;
2845 }
2846
2847 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2848 {
2849         struct phy_device *phydev = hdev->hw.mac.phydev;
2850
2851         *link_status = HCLGE_LINK_STATUS_DOWN;
2852
2853         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2854                 return 0;
2855
2856         if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2857                 return 0;
2858
2859         return hclge_get_mac_link_status(hdev, link_status);
2860 }
2861
2862 static void hclge_update_link_status(struct hclge_dev *hdev)
2863 {
2864         struct hnae3_client *rclient = hdev->roce_client;
2865         struct hnae3_client *client = hdev->nic_client;
2866         struct hnae3_handle *rhandle;
2867         struct hnae3_handle *handle;
2868         int state;
2869         int ret;
2870         int i;
2871
2872         if (!client)
2873                 return;
2874
2875         if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2876                 return;
2877
2878         ret = hclge_get_mac_phy_link(hdev, &state);
2879         if (ret) {
2880                 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2881                 return;
2882         }
2883
2884         if (state != hdev->hw.mac.link) {
2885                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2886                         handle = &hdev->vport[i].nic;
2887                         client->ops->link_status_change(handle, state);
2888                         hclge_config_mac_tnl_int(hdev, state);
2889                         rhandle = &hdev->vport[i].roce;
2890                         if (rclient && rclient->ops->link_status_change)
2891                                 rclient->ops->link_status_change(rhandle,
2892                                                                  state);
2893                 }
2894                 hdev->hw.mac.link = state;
2895         }
2896
2897         clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2898 }
2899
2900 static void hclge_update_port_capability(struct hclge_dev *hdev,
2901                                          struct hclge_mac *mac)
2902 {
2903         if (hnae3_dev_fec_supported(hdev))
2904                 /* update fec ability by speed */
2905                 hclge_convert_setting_fec(mac);
2906
2907         /* firmware can not identify back plane type, the media type
2908          * read from configuration can help deal it
2909          */
2910         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2911             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2912                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2913         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2914                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2915
2916         if (mac->support_autoneg) {
2917                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2918                 linkmode_copy(mac->advertising, mac->supported);
2919         } else {
2920                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2921                                    mac->supported);
2922                 linkmode_zero(mac->advertising);
2923         }
2924 }
2925
2926 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2927 {
2928         struct hclge_sfp_info_cmd *resp;
2929         struct hclge_desc desc;
2930         int ret;
2931
2932         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2933         resp = (struct hclge_sfp_info_cmd *)desc.data;
2934         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2935         if (ret == -EOPNOTSUPP) {
2936                 dev_warn(&hdev->pdev->dev,
2937                          "IMP do not support get SFP speed %d\n", ret);
2938                 return ret;
2939         } else if (ret) {
2940                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2941                 return ret;
2942         }
2943
2944         *speed = le32_to_cpu(resp->speed);
2945
2946         return 0;
2947 }
2948
2949 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2950 {
2951         struct hclge_sfp_info_cmd *resp;
2952         struct hclge_desc desc;
2953         int ret;
2954
2955         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2956         resp = (struct hclge_sfp_info_cmd *)desc.data;
2957
2958         resp->query_type = QUERY_ACTIVE_SPEED;
2959
2960         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2961         if (ret == -EOPNOTSUPP) {
2962                 dev_warn(&hdev->pdev->dev,
2963                          "IMP does not support get SFP info %d\n", ret);
2964                 return ret;
2965         } else if (ret) {
2966                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2967                 return ret;
2968         }
2969
2970         /* In some case, mac speed get from IMP may be 0, it shouldn't be
2971          * set to mac->speed.
2972          */
2973         if (!le32_to_cpu(resp->speed))
2974                 return 0;
2975
2976         mac->speed = le32_to_cpu(resp->speed);
2977         /* if resp->speed_ability is 0, it means it's an old version
2978          * firmware, do not update these params
2979          */
2980         if (resp->speed_ability) {
2981                 mac->module_type = le32_to_cpu(resp->module_type);
2982                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2983                 mac->autoneg = resp->autoneg;
2984                 mac->support_autoneg = resp->autoneg_ability;
2985                 mac->speed_type = QUERY_ACTIVE_SPEED;
2986                 if (!resp->active_fec)
2987                         mac->fec_mode = 0;
2988                 else
2989                         mac->fec_mode = BIT(resp->active_fec);
2990         } else {
2991                 mac->speed_type = QUERY_SFP_SPEED;
2992         }
2993
2994         return 0;
2995 }
2996
2997 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
2998                                         struct ethtool_link_ksettings *cmd)
2999 {
3000         struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3001         struct hclge_vport *vport = hclge_get_vport(handle);
3002         struct hclge_phy_link_ksetting_0_cmd *req0;
3003         struct hclge_phy_link_ksetting_1_cmd *req1;
3004         u32 supported, advertising, lp_advertising;
3005         struct hclge_dev *hdev = vport->back;
3006         int ret;
3007
3008         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3009                                    true);
3010         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3011         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3012                                    true);
3013
3014         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3015         if (ret) {
3016                 dev_err(&hdev->pdev->dev,
3017                         "failed to get phy link ksetting, ret = %d.\n", ret);
3018                 return ret;
3019         }
3020
3021         req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3022         cmd->base.autoneg = req0->autoneg;
3023         cmd->base.speed = le32_to_cpu(req0->speed);
3024         cmd->base.duplex = req0->duplex;
3025         cmd->base.port = req0->port;
3026         cmd->base.transceiver = req0->transceiver;
3027         cmd->base.phy_address = req0->phy_address;
3028         cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3029         cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3030         supported = le32_to_cpu(req0->supported);
3031         advertising = le32_to_cpu(req0->advertising);
3032         lp_advertising = le32_to_cpu(req0->lp_advertising);
3033         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3034                                                 supported);
3035         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3036                                                 advertising);
3037         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3038                                                 lp_advertising);
3039
3040         req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3041         cmd->base.master_slave_cfg = req1->master_slave_cfg;
3042         cmd->base.master_slave_state = req1->master_slave_state;
3043
3044         return 0;
3045 }
3046
3047 static int
3048 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3049                              const struct ethtool_link_ksettings *cmd)
3050 {
3051         struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3052         struct hclge_vport *vport = hclge_get_vport(handle);
3053         struct hclge_phy_link_ksetting_0_cmd *req0;
3054         struct hclge_phy_link_ksetting_1_cmd *req1;
3055         struct hclge_dev *hdev = vport->back;
3056         u32 advertising;
3057         int ret;
3058
3059         if (cmd->base.autoneg == AUTONEG_DISABLE &&
3060             ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3061              (cmd->base.duplex != DUPLEX_HALF &&
3062               cmd->base.duplex != DUPLEX_FULL)))
3063                 return -EINVAL;
3064
3065         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3066                                    false);
3067         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3068         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3069                                    false);
3070
3071         req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3072         req0->autoneg = cmd->base.autoneg;
3073         req0->speed = cpu_to_le32(cmd->base.speed);
3074         req0->duplex = cmd->base.duplex;
3075         ethtool_convert_link_mode_to_legacy_u32(&advertising,
3076                                                 cmd->link_modes.advertising);
3077         req0->advertising = cpu_to_le32(advertising);
3078         req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3079
3080         req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3081         req1->master_slave_cfg = cmd->base.master_slave_cfg;
3082
3083         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3084         if (ret) {
3085                 dev_err(&hdev->pdev->dev,
3086                         "failed to set phy link ksettings, ret = %d.\n", ret);
3087                 return ret;
3088         }
3089
3090         hdev->hw.mac.autoneg = cmd->base.autoneg;
3091         hdev->hw.mac.speed = cmd->base.speed;
3092         hdev->hw.mac.duplex = cmd->base.duplex;
3093         linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3094
3095         return 0;
3096 }
3097
3098 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3099 {
3100         struct ethtool_link_ksettings cmd;
3101         int ret;
3102
3103         if (!hnae3_dev_phy_imp_supported(hdev))
3104                 return 0;
3105
3106         ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3107         if (ret)
3108                 return ret;
3109
3110         hdev->hw.mac.autoneg = cmd.base.autoneg;
3111         hdev->hw.mac.speed = cmd.base.speed;
3112         hdev->hw.mac.duplex = cmd.base.duplex;
3113
3114         return 0;
3115 }
3116
3117 static int hclge_tp_port_init(struct hclge_dev *hdev)
3118 {
3119         struct ethtool_link_ksettings cmd;
3120
3121         if (!hnae3_dev_phy_imp_supported(hdev))
3122                 return 0;
3123
3124         cmd.base.autoneg = hdev->hw.mac.autoneg;
3125         cmd.base.speed = hdev->hw.mac.speed;
3126         cmd.base.duplex = hdev->hw.mac.duplex;
3127         linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3128
3129         return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3130 }
3131
3132 static int hclge_update_port_info(struct hclge_dev *hdev)
3133 {
3134         struct hclge_mac *mac = &hdev->hw.mac;
3135         int speed = HCLGE_MAC_SPEED_UNKNOWN;
3136         int ret;
3137
3138         /* get the port info from SFP cmd if not copper port */
3139         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3140                 return hclge_update_tp_port_info(hdev);
3141
3142         /* if IMP does not support get SFP/qSFP info, return directly */
3143         if (!hdev->support_sfp_query)
3144                 return 0;
3145
3146         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3147                 ret = hclge_get_sfp_info(hdev, mac);
3148         else
3149                 ret = hclge_get_sfp_speed(hdev, &speed);
3150
3151         if (ret == -EOPNOTSUPP) {
3152                 hdev->support_sfp_query = false;
3153                 return ret;
3154         } else if (ret) {
3155                 return ret;
3156         }
3157
3158         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3159                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3160                         hclge_update_port_capability(hdev, mac);
3161                         return 0;
3162                 }
3163                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3164                                                HCLGE_MAC_FULL);
3165         } else {
3166                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3167                         return 0; /* do nothing if no SFP */
3168
3169                 /* must config full duplex for SFP */
3170                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3171         }
3172 }
3173
3174 static int hclge_get_status(struct hnae3_handle *handle)
3175 {
3176         struct hclge_vport *vport = hclge_get_vport(handle);
3177         struct hclge_dev *hdev = vport->back;
3178
3179         hclge_update_link_status(hdev);
3180
3181         return hdev->hw.mac.link;
3182 }
3183
3184 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3185 {
3186         if (!pci_num_vf(hdev->pdev)) {
3187                 dev_err(&hdev->pdev->dev,
3188                         "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3189                 return NULL;
3190         }
3191
3192         if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3193                 dev_err(&hdev->pdev->dev,
3194                         "vf id(%d) is out of range(0 <= vfid < %d)\n",
3195                         vf, pci_num_vf(hdev->pdev));
3196                 return NULL;
3197         }
3198
3199         /* VF start from 1 in vport */
3200         vf += HCLGE_VF_VPORT_START_NUM;
3201         return &hdev->vport[vf];
3202 }
3203
3204 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3205                                struct ifla_vf_info *ivf)
3206 {
3207         struct hclge_vport *vport = hclge_get_vport(handle);
3208         struct hclge_dev *hdev = vport->back;
3209
3210         vport = hclge_get_vf_vport(hdev, vf);
3211         if (!vport)
3212                 return -EINVAL;
3213
3214         ivf->vf = vf;
3215         ivf->linkstate = vport->vf_info.link_state;
3216         ivf->spoofchk = vport->vf_info.spoofchk;
3217         ivf->trusted = vport->vf_info.trusted;
3218         ivf->min_tx_rate = 0;
3219         ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3220         ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3221         ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3222         ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3223         ether_addr_copy(ivf->mac, vport->vf_info.mac);
3224
3225         return 0;
3226 }
3227
3228 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3229                                    int link_state)
3230 {
3231         struct hclge_vport *vport = hclge_get_vport(handle);
3232         struct hclge_dev *hdev = vport->back;
3233
3234         vport = hclge_get_vf_vport(hdev, vf);
3235         if (!vport)
3236                 return -EINVAL;
3237
3238         vport->vf_info.link_state = link_state;
3239
3240         return 0;
3241 }
3242
3243 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3244 {
3245         u32 cmdq_src_reg, msix_src_reg;
3246
3247         /* fetch the events from their corresponding regs */
3248         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3249         msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3250
3251         /* Assumption: If by any chance reset and mailbox events are reported
3252          * together then we will only process reset event in this go and will
3253          * defer the processing of the mailbox events. Since, we would have not
3254          * cleared RX CMDQ event this time we would receive again another
3255          * interrupt from H/W just for the mailbox.
3256          *
3257          * check for vector0 reset event sources
3258          */
3259         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3260                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3261                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3262                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3263                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3264                 hdev->rst_stats.imp_rst_cnt++;
3265                 return HCLGE_VECTOR0_EVENT_RST;
3266         }
3267
3268         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3269                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3270                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3271                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3272                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3273                 hdev->rst_stats.global_rst_cnt++;
3274                 return HCLGE_VECTOR0_EVENT_RST;
3275         }
3276
3277         /* check for vector0 msix event source */
3278         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3279                 *clearval = msix_src_reg;
3280                 return HCLGE_VECTOR0_EVENT_ERR;
3281         }
3282
3283         /* check for vector0 mailbox(=CMDQ RX) event source */
3284         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3285                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3286                 *clearval = cmdq_src_reg;
3287                 return HCLGE_VECTOR0_EVENT_MBX;
3288         }
3289
3290         /* print other vector0 event source */
3291         dev_info(&hdev->pdev->dev,
3292                  "CMDQ INT status:0x%x, other INT status:0x%x\n",
3293                  cmdq_src_reg, msix_src_reg);
3294         *clearval = msix_src_reg;
3295
3296         return HCLGE_VECTOR0_EVENT_OTHER;
3297 }
3298
3299 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3300                                     u32 regclr)
3301 {
3302         switch (event_type) {
3303         case HCLGE_VECTOR0_EVENT_RST:
3304                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3305                 break;
3306         case HCLGE_VECTOR0_EVENT_MBX:
3307                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3308                 break;
3309         default:
3310                 break;
3311         }
3312 }
3313
3314 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3315 {
3316         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3317                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3318                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3319                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3320         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3321 }
3322
3323 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3324 {
3325         writel(enable ? 1 : 0, vector->addr);
3326 }
3327
3328 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3329 {
3330         struct hclge_dev *hdev = data;
3331         u32 clearval = 0;
3332         u32 event_cause;
3333
3334         hclge_enable_vector(&hdev->misc_vector, false);
3335         event_cause = hclge_check_event_cause(hdev, &clearval);
3336
3337         /* vector 0 interrupt is shared with reset and mailbox source events.*/
3338         switch (event_cause) {
3339         case HCLGE_VECTOR0_EVENT_ERR:
3340                 /* we do not know what type of reset is required now. This could
3341                  * only be decided after we fetch the type of errors which
3342                  * caused this event. Therefore, we will do below for now:
3343                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3344                  *    have defered type of reset to be used.
3345                  * 2. Schedule the reset serivce task.
3346                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3347                  *    will fetch the correct type of reset.  This would be done
3348                  *    by first decoding the types of errors.
3349                  */
3350                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3351                 fallthrough;
3352         case HCLGE_VECTOR0_EVENT_RST:
3353                 hclge_reset_task_schedule(hdev);
3354                 break;
3355         case HCLGE_VECTOR0_EVENT_MBX:
3356                 /* If we are here then,
3357                  * 1. Either we are not handling any mbx task and we are not
3358                  *    scheduled as well
3359                  *                        OR
3360                  * 2. We could be handling a mbx task but nothing more is
3361                  *    scheduled.
3362                  * In both cases, we should schedule mbx task as there are more
3363                  * mbx messages reported by this interrupt.
3364                  */
3365                 hclge_mbx_task_schedule(hdev);
3366                 break;
3367         default:
3368                 dev_warn(&hdev->pdev->dev,
3369                          "received unknown or unhandled event of vector0\n");
3370                 break;
3371         }
3372
3373         hclge_clear_event_cause(hdev, event_cause, clearval);
3374
3375         /* Enable interrupt if it is not cause by reset. And when
3376          * clearval equal to 0, it means interrupt status may be
3377          * cleared by hardware before driver reads status register.
3378          * For this case, vector0 interrupt also should be enabled.
3379          */
3380         if (!clearval ||
3381             event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3382                 hclge_enable_vector(&hdev->misc_vector, true);
3383         }
3384
3385         return IRQ_HANDLED;
3386 }
3387
3388 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3389 {
3390         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3391                 dev_warn(&hdev->pdev->dev,
3392                          "vector(vector_id %d) has been freed.\n", vector_id);
3393                 return;
3394         }
3395
3396         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3397         hdev->num_msi_left += 1;
3398         hdev->num_msi_used -= 1;
3399 }
3400
3401 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3402 {
3403         struct hclge_misc_vector *vector = &hdev->misc_vector;
3404
3405         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3406
3407         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3408         hdev->vector_status[0] = 0;
3409
3410         hdev->num_msi_left -= 1;
3411         hdev->num_msi_used += 1;
3412 }
3413
3414 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3415                                       const cpumask_t *mask)
3416 {
3417         struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3418                                               affinity_notify);
3419
3420         cpumask_copy(&hdev->affinity_mask, mask);
3421 }
3422
3423 static void hclge_irq_affinity_release(struct kref *ref)
3424 {
3425 }
3426
3427 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3428 {
3429         irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3430                               &hdev->affinity_mask);
3431
3432         hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3433         hdev->affinity_notify.release = hclge_irq_affinity_release;
3434         irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3435                                   &hdev->affinity_notify);
3436 }
3437
3438 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3439 {
3440         irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3441         irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3442 }
3443
3444 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3445 {
3446         int ret;
3447
3448         hclge_get_misc_vector(hdev);
3449
3450         /* this would be explicitly freed in the end */
3451         snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3452                  HCLGE_NAME, pci_name(hdev->pdev));
3453         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3454                           0, hdev->misc_vector.name, hdev);
3455         if (ret) {
3456                 hclge_free_vector(hdev, 0);
3457                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3458                         hdev->misc_vector.vector_irq);
3459         }
3460
3461         return ret;
3462 }
3463
3464 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3465 {
3466         free_irq(hdev->misc_vector.vector_irq, hdev);
3467         hclge_free_vector(hdev, 0);
3468 }
3469
3470 int hclge_notify_client(struct hclge_dev *hdev,
3471                         enum hnae3_reset_notify_type type)
3472 {
3473         struct hnae3_client *client = hdev->nic_client;
3474         u16 i;
3475
3476         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3477                 return 0;
3478
3479         if (!client->ops->reset_notify)
3480                 return -EOPNOTSUPP;
3481
3482         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3483                 struct hnae3_handle *handle = &hdev->vport[i].nic;
3484                 int ret;
3485
3486                 ret = client->ops->reset_notify(handle, type);
3487                 if (ret) {
3488                         dev_err(&hdev->pdev->dev,
3489                                 "notify nic client failed %d(%d)\n", type, ret);
3490                         return ret;
3491                 }
3492         }
3493
3494         return 0;
3495 }
3496
3497 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3498                                     enum hnae3_reset_notify_type type)
3499 {
3500         struct hnae3_client *client = hdev->roce_client;
3501         int ret;
3502         u16 i;
3503
3504         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3505                 return 0;
3506
3507         if (!client->ops->reset_notify)
3508                 return -EOPNOTSUPP;
3509
3510         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3511                 struct hnae3_handle *handle = &hdev->vport[i].roce;
3512
3513                 ret = client->ops->reset_notify(handle, type);
3514                 if (ret) {
3515                         dev_err(&hdev->pdev->dev,
3516                                 "notify roce client failed %d(%d)",
3517                                 type, ret);
3518                         return ret;
3519                 }
3520         }
3521
3522         return ret;
3523 }
3524
3525 static int hclge_reset_wait(struct hclge_dev *hdev)
3526 {
3527 #define HCLGE_RESET_WATI_MS     100
3528 #define HCLGE_RESET_WAIT_CNT    350
3529
3530         u32 val, reg, reg_bit;
3531         u32 cnt = 0;
3532
3533         switch (hdev->reset_type) {
3534         case HNAE3_IMP_RESET:
3535                 reg = HCLGE_GLOBAL_RESET_REG;
3536                 reg_bit = HCLGE_IMP_RESET_BIT;
3537                 break;
3538         case HNAE3_GLOBAL_RESET:
3539                 reg = HCLGE_GLOBAL_RESET_REG;
3540                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3541                 break;
3542         case HNAE3_FUNC_RESET:
3543                 reg = HCLGE_FUN_RST_ING;
3544                 reg_bit = HCLGE_FUN_RST_ING_B;
3545                 break;
3546         default:
3547                 dev_err(&hdev->pdev->dev,
3548                         "Wait for unsupported reset type: %d\n",
3549                         hdev->reset_type);
3550                 return -EINVAL;
3551         }
3552
3553         val = hclge_read_dev(&hdev->hw, reg);
3554         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3555                 msleep(HCLGE_RESET_WATI_MS);
3556                 val = hclge_read_dev(&hdev->hw, reg);
3557                 cnt++;
3558         }
3559
3560         if (cnt >= HCLGE_RESET_WAIT_CNT) {
3561                 dev_warn(&hdev->pdev->dev,
3562                          "Wait for reset timeout: %d\n", hdev->reset_type);
3563                 return -EBUSY;
3564         }
3565
3566         return 0;
3567 }
3568
3569 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3570 {
3571         struct hclge_vf_rst_cmd *req;
3572         struct hclge_desc desc;
3573
3574         req = (struct hclge_vf_rst_cmd *)desc.data;
3575         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3576         req->dest_vfid = func_id;
3577
3578         if (reset)
3579                 req->vf_rst = 0x1;
3580
3581         return hclge_cmd_send(&hdev->hw, &desc, 1);
3582 }
3583
3584 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3585 {
3586         int i;
3587
3588         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3589                 struct hclge_vport *vport = &hdev->vport[i];
3590                 int ret;
3591
3592                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3593                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3594                 if (ret) {
3595                         dev_err(&hdev->pdev->dev,
3596                                 "set vf(%u) rst failed %d!\n",
3597                                 vport->vport_id, ret);
3598                         return ret;
3599                 }
3600
3601                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3602                         continue;
3603
3604                 /* Inform VF to process the reset.
3605                  * hclge_inform_reset_assert_to_vf may fail if VF
3606                  * driver is not loaded.
3607                  */
3608                 ret = hclge_inform_reset_assert_to_vf(vport);
3609                 if (ret)
3610                         dev_warn(&hdev->pdev->dev,
3611                                  "inform reset to vf(%u) failed %d!\n",
3612                                  vport->vport_id, ret);
3613         }
3614
3615         return 0;
3616 }
3617
3618 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3619 {
3620         if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3621             test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3622             test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3623                 return;
3624
3625         hclge_mbx_handler(hdev);
3626
3627         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3628 }
3629
3630 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3631 {
3632         struct hclge_pf_rst_sync_cmd *req;
3633         struct hclge_desc desc;
3634         int cnt = 0;
3635         int ret;
3636
3637         req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3638         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3639
3640         do {
3641                 /* vf need to down netdev by mbx during PF or FLR reset */
3642                 hclge_mailbox_service_task(hdev);
3643
3644                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3645                 /* for compatible with old firmware, wait
3646                  * 100 ms for VF to stop IO
3647                  */
3648                 if (ret == -EOPNOTSUPP) {
3649                         msleep(HCLGE_RESET_SYNC_TIME);
3650                         return;
3651                 } else if (ret) {
3652                         dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3653                                  ret);
3654                         return;
3655                 } else if (req->all_vf_ready) {
3656                         return;
3657                 }
3658                 msleep(HCLGE_PF_RESET_SYNC_TIME);
3659                 hclge_cmd_reuse_desc(&desc, true);
3660         } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3661
3662         dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3663 }
3664
3665 void hclge_report_hw_error(struct hclge_dev *hdev,
3666                            enum hnae3_hw_error_type type)
3667 {
3668         struct hnae3_client *client = hdev->nic_client;
3669         u16 i;
3670
3671         if (!client || !client->ops->process_hw_error ||
3672             !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3673                 return;
3674
3675         for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3676                 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3677 }
3678
3679 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3680 {
3681         u32 reg_val;
3682
3683         reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3684         if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3685                 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3686                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3687                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3688         }
3689
3690         if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3691                 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3692                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3693                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3694         }
3695 }
3696
3697 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3698 {
3699         struct hclge_desc desc;
3700         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3701         int ret;
3702
3703         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3704         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3705         req->fun_reset_vfid = func_id;
3706
3707         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3708         if (ret)
3709                 dev_err(&hdev->pdev->dev,
3710                         "send function reset cmd fail, status =%d\n", ret);
3711
3712         return ret;
3713 }
3714
3715 static void hclge_do_reset(struct hclge_dev *hdev)
3716 {
3717         struct hnae3_handle *handle = &hdev->vport[0].nic;
3718         struct pci_dev *pdev = hdev->pdev;
3719         u32 val;
3720
3721         if (hclge_get_hw_reset_stat(handle)) {
3722                 dev_info(&pdev->dev, "hardware reset not finish\n");
3723                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3724                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3725                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3726                 return;
3727         }
3728
3729         switch (hdev->reset_type) {
3730         case HNAE3_GLOBAL_RESET:
3731                 dev_info(&pdev->dev, "global reset requested\n");
3732                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3733                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3734                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3735                 break;
3736         case HNAE3_FUNC_RESET:
3737                 dev_info(&pdev->dev, "PF reset requested\n");
3738                 /* schedule again to check later */
3739                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3740                 hclge_reset_task_schedule(hdev);
3741                 break;
3742         default:
3743                 dev_warn(&pdev->dev,
3744                          "unsupported reset type: %d\n", hdev->reset_type);
3745                 break;
3746         }
3747 }
3748
3749 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3750                                                    unsigned long *addr)
3751 {
3752         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3753         struct hclge_dev *hdev = ae_dev->priv;
3754
3755         /* first, resolve any unknown reset type to the known type(s) */
3756         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3757                 u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3758                                         HCLGE_MISC_VECTOR_INT_STS);
3759                 /* we will intentionally ignore any errors from this function
3760                  *  as we will end up in *some* reset request in any case
3761                  */
3762                 if (hclge_handle_hw_msix_error(hdev, addr))
3763                         dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3764                                  msix_sts_reg);
3765
3766                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3767                 /* We defered the clearing of the error event which caused
3768                  * interrupt since it was not posssible to do that in
3769                  * interrupt context (and this is the reason we introduced
3770                  * new UNKNOWN reset type). Now, the errors have been
3771                  * handled and cleared in hardware we can safely enable
3772                  * interrupts. This is an exception to the norm.
3773                  */
3774                 hclge_enable_vector(&hdev->misc_vector, true);
3775         }
3776
3777         /* return the highest priority reset level amongst all */
3778         if (test_bit(HNAE3_IMP_RESET, addr)) {
3779                 rst_level = HNAE3_IMP_RESET;
3780                 clear_bit(HNAE3_IMP_RESET, addr);
3781                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3782                 clear_bit(HNAE3_FUNC_RESET, addr);
3783         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3784                 rst_level = HNAE3_GLOBAL_RESET;
3785                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3786                 clear_bit(HNAE3_FUNC_RESET, addr);
3787         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3788                 rst_level = HNAE3_FUNC_RESET;
3789                 clear_bit(HNAE3_FUNC_RESET, addr);
3790         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3791                 rst_level = HNAE3_FLR_RESET;
3792                 clear_bit(HNAE3_FLR_RESET, addr);
3793         }
3794
3795         if (hdev->reset_type != HNAE3_NONE_RESET &&
3796             rst_level < hdev->reset_type)
3797                 return HNAE3_NONE_RESET;
3798
3799         return rst_level;
3800 }
3801
3802 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3803 {
3804         u32 clearval = 0;
3805
3806         switch (hdev->reset_type) {
3807         case HNAE3_IMP_RESET:
3808                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3809                 break;
3810         case HNAE3_GLOBAL_RESET:
3811                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3812                 break;
3813         default:
3814                 break;
3815         }
3816
3817         if (!clearval)
3818                 return;
3819
3820         /* For revision 0x20, the reset interrupt source
3821          * can only be cleared after hardware reset done
3822          */
3823         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3824                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3825                                 clearval);
3826
3827         hclge_enable_vector(&hdev->misc_vector, true);
3828 }
3829
3830 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3831 {
3832         u32 reg_val;
3833
3834         reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3835         if (enable)
3836                 reg_val |= HCLGE_NIC_SW_RST_RDY;
3837         else
3838                 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3839
3840         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3841 }
3842
3843 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3844 {
3845         int ret;
3846
3847         ret = hclge_set_all_vf_rst(hdev, true);
3848         if (ret)
3849                 return ret;
3850
3851         hclge_func_reset_sync_vf(hdev);
3852
3853         return 0;
3854 }
3855
3856 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3857 {
3858         u32 reg_val;
3859         int ret = 0;
3860
3861         switch (hdev->reset_type) {
3862         case HNAE3_FUNC_RESET:
3863                 ret = hclge_func_reset_notify_vf(hdev);
3864                 if (ret)
3865                         return ret;
3866
3867                 ret = hclge_func_reset_cmd(hdev, 0);
3868                 if (ret) {
3869                         dev_err(&hdev->pdev->dev,
3870                                 "asserting function reset fail %d!\n", ret);
3871                         return ret;
3872                 }
3873
3874                 /* After performaning pf reset, it is not necessary to do the
3875                  * mailbox handling or send any command to firmware, because
3876                  * any mailbox handling or command to firmware is only valid
3877                  * after hclge_cmd_init is called.
3878                  */
3879                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3880                 hdev->rst_stats.pf_rst_cnt++;
3881                 break;
3882         case HNAE3_FLR_RESET:
3883                 ret = hclge_func_reset_notify_vf(hdev);
3884                 if (ret)
3885                         return ret;
3886                 break;
3887         case HNAE3_IMP_RESET:
3888                 hclge_handle_imp_error(hdev);
3889                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3890                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3891                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3892                 break;
3893         default:
3894                 break;
3895         }
3896
3897         /* inform hardware that preparatory work is done */
3898         msleep(HCLGE_RESET_SYNC_TIME);
3899         hclge_reset_handshake(hdev, true);
3900         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3901
3902         return ret;
3903 }
3904
3905 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3906 {
3907 #define MAX_RESET_FAIL_CNT 5
3908
3909         if (hdev->reset_pending) {
3910                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3911                          hdev->reset_pending);
3912                 return true;
3913         } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3914                    HCLGE_RESET_INT_M) {
3915                 dev_info(&hdev->pdev->dev,
3916                          "reset failed because new reset interrupt\n");
3917                 hclge_clear_reset_cause(hdev);
3918                 return false;
3919         } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3920                 hdev->rst_stats.reset_fail_cnt++;
3921                 set_bit(hdev->reset_type, &hdev->reset_pending);
3922                 dev_info(&hdev->pdev->dev,
3923                          "re-schedule reset task(%u)\n",
3924                          hdev->rst_stats.reset_fail_cnt);
3925                 return true;
3926         }
3927
3928         hclge_clear_reset_cause(hdev);
3929
3930         /* recover the handshake status when reset fail */
3931         hclge_reset_handshake(hdev, true);
3932
3933         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3934
3935         hclge_dbg_dump_rst_info(hdev);
3936
3937         set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3938
3939         return false;
3940 }
3941
3942 static int hclge_set_rst_done(struct hclge_dev *hdev)
3943 {
3944         struct hclge_pf_rst_done_cmd *req;
3945         struct hclge_desc desc;
3946         int ret;
3947
3948         req = (struct hclge_pf_rst_done_cmd *)desc.data;
3949         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3950         req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3951
3952         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3953         /* To be compatible with the old firmware, which does not support
3954          * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3955          * return success
3956          */
3957         if (ret == -EOPNOTSUPP) {
3958                 dev_warn(&hdev->pdev->dev,
3959                          "current firmware does not support command(0x%x)!\n",
3960                          HCLGE_OPC_PF_RST_DONE);
3961                 return 0;
3962         } else if (ret) {
3963                 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3964                         ret);
3965         }
3966
3967         return ret;
3968 }
3969
3970 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3971 {
3972         int ret = 0;
3973
3974         switch (hdev->reset_type) {
3975         case HNAE3_FUNC_RESET:
3976         case HNAE3_FLR_RESET:
3977                 ret = hclge_set_all_vf_rst(hdev, false);
3978                 break;
3979         case HNAE3_GLOBAL_RESET:
3980         case HNAE3_IMP_RESET:
3981                 ret = hclge_set_rst_done(hdev);
3982                 break;
3983         default:
3984                 break;
3985         }
3986
3987         /* clear up the handshake status after re-initialize done */
3988         hclge_reset_handshake(hdev, false);
3989
3990         return ret;
3991 }
3992
3993 static int hclge_reset_stack(struct hclge_dev *hdev)
3994 {
3995         int ret;
3996
3997         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3998         if (ret)
3999                 return ret;
4000
4001         ret = hclge_reset_ae_dev(hdev->ae_dev);
4002         if (ret)
4003                 return ret;
4004
4005         return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4006 }
4007
4008 static int hclge_reset_prepare(struct hclge_dev *hdev)
4009 {
4010         int ret;
4011
4012         hdev->rst_stats.reset_cnt++;
4013         /* perform reset of the stack & ae device for a client */
4014         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4015         if (ret)
4016                 return ret;
4017
4018         rtnl_lock();
4019         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4020         rtnl_unlock();
4021         if (ret)
4022                 return ret;
4023
4024         return hclge_reset_prepare_wait(hdev);
4025 }
4026
4027 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4028 {
4029         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4030         enum hnae3_reset_type reset_level;
4031         int ret;
4032
4033         hdev->rst_stats.hw_reset_done_cnt++;
4034
4035         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4036         if (ret)
4037                 return ret;
4038
4039         rtnl_lock();
4040         ret = hclge_reset_stack(hdev);
4041         rtnl_unlock();
4042         if (ret)
4043                 return ret;
4044
4045         hclge_clear_reset_cause(hdev);
4046
4047         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4048         /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4049          * times
4050          */
4051         if (ret &&
4052             hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4053                 return ret;
4054
4055         ret = hclge_reset_prepare_up(hdev);
4056         if (ret)
4057                 return ret;
4058
4059         rtnl_lock();
4060         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4061         rtnl_unlock();
4062         if (ret)
4063                 return ret;
4064
4065         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4066         if (ret)
4067                 return ret;
4068
4069         hdev->last_reset_time = jiffies;
4070         hdev->rst_stats.reset_fail_cnt = 0;
4071         hdev->rst_stats.reset_done_cnt++;
4072         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4073
4074         /* if default_reset_request has a higher level reset request,
4075          * it should be handled as soon as possible. since some errors
4076          * need this kind of reset to fix.
4077          */
4078         reset_level = hclge_get_reset_level(ae_dev,
4079                                             &hdev->default_reset_request);
4080         if (reset_level != HNAE3_NONE_RESET)
4081                 set_bit(reset_level, &hdev->reset_request);
4082
4083         return 0;
4084 }
4085
4086 static void hclge_reset(struct hclge_dev *hdev)
4087 {
4088         if (hclge_reset_prepare(hdev))
4089                 goto err_reset;
4090
4091         if (hclge_reset_wait(hdev))
4092                 goto err_reset;
4093
4094         if (hclge_reset_rebuild(hdev))
4095                 goto err_reset;
4096
4097         return;
4098
4099 err_reset:
4100         if (hclge_reset_err_handle(hdev))
4101                 hclge_reset_task_schedule(hdev);
4102 }
4103
4104 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4105 {
4106         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4107         struct hclge_dev *hdev = ae_dev->priv;
4108
4109         /* We might end up getting called broadly because of 2 below cases:
4110          * 1. Recoverable error was conveyed through APEI and only way to bring
4111          *    normalcy is to reset.
4112          * 2. A new reset request from the stack due to timeout
4113          *
4114          * For the first case,error event might not have ae handle available.
4115          * check if this is a new reset request and we are not here just because
4116          * last reset attempt did not succeed and watchdog hit us again. We will
4117          * know this if last reset request did not occur very recently (watchdog
4118          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4119          * In case of new request we reset the "reset level" to PF reset.
4120          * And if it is a repeat reset request of the most recent one then we
4121          * want to make sure we throttle the reset request. Therefore, we will
4122          * not allow it again before 3*HZ times.
4123          */
4124         if (!handle)
4125                 handle = &hdev->vport[0].nic;
4126
4127         if (time_before(jiffies, (hdev->last_reset_time +
4128                                   HCLGE_RESET_INTERVAL))) {
4129                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4130                 return;
4131         } else if (hdev->default_reset_request) {
4132                 hdev->reset_level =
4133                         hclge_get_reset_level(ae_dev,
4134                                               &hdev->default_reset_request);
4135         } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4136                 hdev->reset_level = HNAE3_FUNC_RESET;
4137         }
4138
4139         dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4140                  hdev->reset_level);
4141
4142         /* request reset & schedule reset task */
4143         set_bit(hdev->reset_level, &hdev->reset_request);
4144         hclge_reset_task_schedule(hdev);
4145
4146         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4147                 hdev->reset_level++;
4148 }
4149
4150 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4151                                         enum hnae3_reset_type rst_type)
4152 {
4153         struct hclge_dev *hdev = ae_dev->priv;
4154
4155         set_bit(rst_type, &hdev->default_reset_request);
4156 }
4157
4158 static void hclge_reset_timer(struct timer_list *t)
4159 {
4160         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4161
4162         /* if default_reset_request has no value, it means that this reset
4163          * request has already be handled, so just return here
4164          */
4165         if (!hdev->default_reset_request)
4166                 return;
4167
4168         dev_info(&hdev->pdev->dev,
4169                  "triggering reset in reset timer\n");
4170         hclge_reset_event(hdev->pdev, NULL);
4171 }
4172
4173 static void hclge_reset_subtask(struct hclge_dev *hdev)
4174 {
4175         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4176
4177         /* check if there is any ongoing reset in the hardware. This status can
4178          * be checked from reset_pending. If there is then, we need to wait for
4179          * hardware to complete reset.
4180          *    a. If we are able to figure out in reasonable time that hardware
4181          *       has fully resetted then, we can proceed with driver, client
4182          *       reset.
4183          *    b. else, we can come back later to check this status so re-sched
4184          *       now.
4185          */
4186         hdev->last_reset_time = jiffies;
4187         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4188         if (hdev->reset_type != HNAE3_NONE_RESET)
4189                 hclge_reset(hdev);
4190
4191         /* check if we got any *new* reset requests to be honored */
4192         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4193         if (hdev->reset_type != HNAE3_NONE_RESET)
4194                 hclge_do_reset(hdev);
4195
4196         hdev->reset_type = HNAE3_NONE_RESET;
4197 }
4198
4199 static void hclge_reset_service_task(struct hclge_dev *hdev)
4200 {
4201         if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4202                 return;
4203
4204         down(&hdev->reset_sem);
4205         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4206
4207         hclge_reset_subtask(hdev);
4208
4209         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4210         up(&hdev->reset_sem);
4211 }
4212
4213 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4214 {
4215         int i;
4216
4217         /* start from vport 1 for PF is always alive */
4218         for (i = 1; i < hdev->num_alloc_vport; i++) {
4219                 struct hclge_vport *vport = &hdev->vport[i];
4220
4221                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4222                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4223
4224                 /* If vf is not alive, set to default value */
4225                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4226                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4227         }
4228 }
4229
4230 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4231 {
4232         unsigned long delta = round_jiffies_relative(HZ);
4233
4234         if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4235                 return;
4236
4237         /* Always handle the link updating to make sure link state is
4238          * updated when it is triggered by mbx.
4239          */
4240         hclge_update_link_status(hdev);
4241         hclge_sync_mac_table(hdev);
4242         hclge_sync_promisc_mode(hdev);
4243
4244         if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4245                 delta = jiffies - hdev->last_serv_processed;
4246
4247                 if (delta < round_jiffies_relative(HZ)) {
4248                         delta = round_jiffies_relative(HZ) - delta;
4249                         goto out;
4250                 }
4251         }
4252
4253         hdev->serv_processed_cnt++;
4254         hclge_update_vport_alive(hdev);
4255
4256         if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4257                 hdev->last_serv_processed = jiffies;
4258                 goto out;
4259         }
4260
4261         if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4262                 hclge_update_stats_for_all(hdev);
4263
4264         hclge_update_port_info(hdev);
4265         hclge_sync_vlan_filter(hdev);
4266
4267         if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4268                 hclge_rfs_filter_expire(hdev);
4269
4270         hdev->last_serv_processed = jiffies;
4271
4272 out:
4273         hclge_task_schedule(hdev, delta);
4274 }
4275
4276 static void hclge_service_task(struct work_struct *work)
4277 {
4278         struct hclge_dev *hdev =
4279                 container_of(work, struct hclge_dev, service_task.work);
4280
4281         hclge_reset_service_task(hdev);
4282         hclge_mailbox_service_task(hdev);
4283         hclge_periodic_service_task(hdev);
4284
4285         /* Handle reset and mbx again in case periodical task delays the
4286          * handling by calling hclge_task_schedule() in
4287          * hclge_periodic_service_task().
4288          */
4289         hclge_reset_service_task(hdev);
4290         hclge_mailbox_service_task(hdev);
4291 }
4292
4293 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4294 {
4295         /* VF handle has no client */
4296         if (!handle->client)
4297                 return container_of(handle, struct hclge_vport, nic);
4298         else if (handle->client->type == HNAE3_CLIENT_ROCE)
4299                 return container_of(handle, struct hclge_vport, roce);
4300         else
4301                 return container_of(handle, struct hclge_vport, nic);
4302 }
4303
4304 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4305                                   struct hnae3_vector_info *vector_info)
4306 {
4307 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2  64
4308
4309         vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4310
4311         /* need an extend offset to config vector >= 64 */
4312         if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4313                 vector_info->io_addr = hdev->hw.io_base +
4314                                 HCLGE_VECTOR_REG_BASE +
4315                                 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4316         else
4317                 vector_info->io_addr = hdev->hw.io_base +
4318                                 HCLGE_VECTOR_EXT_REG_BASE +
4319                                 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4320                                 HCLGE_VECTOR_REG_OFFSET_H +
4321                                 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4322                                 HCLGE_VECTOR_REG_OFFSET;
4323
4324         hdev->vector_status[idx] = hdev->vport[0].vport_id;
4325         hdev->vector_irq[idx] = vector_info->vector;
4326 }
4327
4328 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4329                             struct hnae3_vector_info *vector_info)
4330 {
4331         struct hclge_vport *vport = hclge_get_vport(handle);
4332         struct hnae3_vector_info *vector = vector_info;
4333         struct hclge_dev *hdev = vport->back;
4334         int alloc = 0;
4335         u16 i = 0;
4336         u16 j;
4337
4338         vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4339         vector_num = min(hdev->num_msi_left, vector_num);
4340
4341         for (j = 0; j < vector_num; j++) {
4342                 while (++i < hdev->num_nic_msi) {
4343                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4344                                 hclge_get_vector_info(hdev, i, vector);
4345                                 vector++;
4346                                 alloc++;
4347
4348                                 break;
4349                         }
4350                 }
4351         }
4352         hdev->num_msi_left -= alloc;
4353         hdev->num_msi_used += alloc;
4354
4355         return alloc;
4356 }
4357
4358 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4359 {
4360         int i;
4361
4362         for (i = 0; i < hdev->num_msi; i++)
4363                 if (vector == hdev->vector_irq[i])
4364                         return i;
4365
4366         return -EINVAL;
4367 }
4368
4369 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4370 {
4371         struct hclge_vport *vport = hclge_get_vport(handle);
4372         struct hclge_dev *hdev = vport->back;
4373         int vector_id;
4374
4375         vector_id = hclge_get_vector_index(hdev, vector);
4376         if (vector_id < 0) {
4377                 dev_err(&hdev->pdev->dev,
4378                         "Get vector index fail. vector = %d\n", vector);
4379                 return vector_id;
4380         }
4381
4382         hclge_free_vector(hdev, vector_id);
4383
4384         return 0;
4385 }
4386
4387 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4388 {
4389         return HCLGE_RSS_KEY_SIZE;
4390 }
4391
4392 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4393                                   const u8 hfunc, const u8 *key)
4394 {
4395         struct hclge_rss_config_cmd *req;
4396         unsigned int key_offset = 0;
4397         struct hclge_desc desc;
4398         int key_counts;
4399         int key_size;
4400         int ret;
4401
4402         key_counts = HCLGE_RSS_KEY_SIZE;
4403         req = (struct hclge_rss_config_cmd *)desc.data;
4404
4405         while (key_counts) {
4406                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4407                                            false);
4408
4409                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4410                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4411
4412                 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4413                 memcpy(req->hash_key,
4414                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4415
4416                 key_counts -= key_size;
4417                 key_offset++;
4418                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4419                 if (ret) {
4420                         dev_err(&hdev->pdev->dev,
4421                                 "Configure RSS config fail, status = %d\n",
4422                                 ret);
4423                         return ret;
4424                 }
4425         }
4426         return 0;
4427 }
4428
4429 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4430 {
4431         struct hclge_rss_indirection_table_cmd *req;
4432         struct hclge_desc desc;
4433         int rss_cfg_tbl_num;
4434         u8 rss_msb_oft;
4435         u8 rss_msb_val;
4436         int ret;
4437         u16 qid;
4438         int i;
4439         u32 j;
4440
4441         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4442         rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4443                           HCLGE_RSS_CFG_TBL_SIZE;
4444
4445         for (i = 0; i < rss_cfg_tbl_num; i++) {
4446                 hclge_cmd_setup_basic_desc
4447                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4448
4449                 req->start_table_index =
4450                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4451                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4452                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4453                         qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4454                         req->rss_qid_l[j] = qid & 0xff;
4455                         rss_msb_oft =
4456                                 j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4457                         rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4458                                 (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4459                         req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4460                 }
4461                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4462                 if (ret) {
4463                         dev_err(&hdev->pdev->dev,
4464                                 "Configure rss indir table fail,status = %d\n",
4465                                 ret);
4466                         return ret;
4467                 }
4468         }
4469         return 0;
4470 }
4471
4472 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4473                                  u16 *tc_size, u16 *tc_offset)
4474 {
4475         struct hclge_rss_tc_mode_cmd *req;
4476         struct hclge_desc desc;
4477         int ret;
4478         int i;
4479
4480         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4481         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4482
4483         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4484                 u16 mode = 0;
4485
4486                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4487                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4488                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4489                 hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4490                               tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4491                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4492                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4493
4494                 req->rss_tc_mode[i] = cpu_to_le16(mode);
4495         }
4496
4497         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4498         if (ret)
4499                 dev_err(&hdev->pdev->dev,
4500                         "Configure rss tc mode fail, status = %d\n", ret);
4501
4502         return ret;
4503 }
4504
4505 static void hclge_get_rss_type(struct hclge_vport *vport)
4506 {
4507         if (vport->rss_tuple_sets.ipv4_tcp_en ||
4508             vport->rss_tuple_sets.ipv4_udp_en ||
4509             vport->rss_tuple_sets.ipv4_sctp_en ||
4510             vport->rss_tuple_sets.ipv6_tcp_en ||
4511             vport->rss_tuple_sets.ipv6_udp_en ||
4512             vport->rss_tuple_sets.ipv6_sctp_en)
4513                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4514         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4515                  vport->rss_tuple_sets.ipv6_fragment_en)
4516                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4517         else
4518                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4519 }
4520
4521 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4522 {
4523         struct hclge_rss_input_tuple_cmd *req;
4524         struct hclge_desc desc;
4525         int ret;
4526
4527         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4528
4529         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4530
4531         /* Get the tuple cfg from pf */
4532         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4533         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4534         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4535         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4536         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4537         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4538         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4539         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4540         hclge_get_rss_type(&hdev->vport[0]);
4541         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4542         if (ret)
4543                 dev_err(&hdev->pdev->dev,
4544                         "Configure rss input fail, status = %d\n", ret);
4545         return ret;
4546 }
4547
4548 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4549                          u8 *key, u8 *hfunc)
4550 {
4551         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4552         struct hclge_vport *vport = hclge_get_vport(handle);
4553         int i;
4554
4555         /* Get hash algorithm */
4556         if (hfunc) {
4557                 switch (vport->rss_algo) {
4558                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4559                         *hfunc = ETH_RSS_HASH_TOP;
4560                         break;
4561                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4562                         *hfunc = ETH_RSS_HASH_XOR;
4563                         break;
4564                 default:
4565                         *hfunc = ETH_RSS_HASH_UNKNOWN;
4566                         break;
4567                 }
4568         }
4569
4570         /* Get the RSS Key required by the user */
4571         if (key)
4572                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4573
4574         /* Get indirect table */
4575         if (indir)
4576                 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4577                         indir[i] =  vport->rss_indirection_tbl[i];
4578
4579         return 0;
4580 }
4581
4582 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4583                          const  u8 *key, const  u8 hfunc)
4584 {
4585         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4586         struct hclge_vport *vport = hclge_get_vport(handle);
4587         struct hclge_dev *hdev = vport->back;
4588         u8 hash_algo;
4589         int ret, i;
4590
4591         /* Set the RSS Hash Key if specififed by the user */
4592         if (key) {
4593                 switch (hfunc) {
4594                 case ETH_RSS_HASH_TOP:
4595                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4596                         break;
4597                 case ETH_RSS_HASH_XOR:
4598                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4599                         break;
4600                 case ETH_RSS_HASH_NO_CHANGE:
4601                         hash_algo = vport->rss_algo;
4602                         break;
4603                 default:
4604                         return -EINVAL;
4605                 }
4606
4607                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4608                 if (ret)
4609                         return ret;
4610
4611                 /* Update the shadow RSS key with user specified qids */
4612                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4613                 vport->rss_algo = hash_algo;
4614         }
4615
4616         /* Update the shadow RSS table with user specified qids */
4617         for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4618                 vport->rss_indirection_tbl[i] = indir[i];
4619
4620         /* Update the hardware */
4621         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4622 }
4623
4624 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4625 {
4626         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4627
4628         if (nfc->data & RXH_L4_B_2_3)
4629                 hash_sets |= HCLGE_D_PORT_BIT;
4630         else
4631                 hash_sets &= ~HCLGE_D_PORT_BIT;
4632
4633         if (nfc->data & RXH_IP_SRC)
4634                 hash_sets |= HCLGE_S_IP_BIT;
4635         else
4636                 hash_sets &= ~HCLGE_S_IP_BIT;
4637
4638         if (nfc->data & RXH_IP_DST)
4639                 hash_sets |= HCLGE_D_IP_BIT;
4640         else
4641                 hash_sets &= ~HCLGE_D_IP_BIT;
4642
4643         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4644                 hash_sets |= HCLGE_V_TAG_BIT;
4645
4646         return hash_sets;
4647 }
4648
4649 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4650                                     struct ethtool_rxnfc *nfc,
4651                                     struct hclge_rss_input_tuple_cmd *req)
4652 {
4653         struct hclge_dev *hdev = vport->back;
4654         u8 tuple_sets;
4655
4656         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4657         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4658         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4659         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4660         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4661         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4662         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4663         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4664
4665         tuple_sets = hclge_get_rss_hash_bits(nfc);
4666         switch (nfc->flow_type) {
4667         case TCP_V4_FLOW:
4668                 req->ipv4_tcp_en = tuple_sets;
4669                 break;
4670         case TCP_V6_FLOW:
4671                 req->ipv6_tcp_en = tuple_sets;
4672                 break;
4673         case UDP_V4_FLOW:
4674                 req->ipv4_udp_en = tuple_sets;
4675                 break;
4676         case UDP_V6_FLOW:
4677                 req->ipv6_udp_en = tuple_sets;
4678                 break;
4679         case SCTP_V4_FLOW:
4680                 req->ipv4_sctp_en = tuple_sets;
4681                 break;
4682         case SCTP_V6_FLOW:
4683                 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4684                     (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4685                         return -EINVAL;
4686
4687                 req->ipv6_sctp_en = tuple_sets;
4688                 break;
4689         case IPV4_FLOW:
4690                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4691                 break;
4692         case IPV6_FLOW:
4693                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4694                 break;
4695         default:
4696                 return -EINVAL;
4697         }
4698
4699         return 0;
4700 }
4701
4702 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4703                                struct ethtool_rxnfc *nfc)
4704 {
4705         struct hclge_vport *vport = hclge_get_vport(handle);
4706         struct hclge_dev *hdev = vport->back;
4707         struct hclge_rss_input_tuple_cmd *req;
4708         struct hclge_desc desc;
4709         int ret;
4710
4711         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4712                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
4713                 return -EINVAL;
4714
4715         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4716         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4717
4718         ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4719         if (ret) {
4720                 dev_err(&hdev->pdev->dev,
4721                         "failed to init rss tuple cmd, ret = %d\n", ret);
4722                 return ret;
4723         }
4724
4725         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4726         if (ret) {
4727                 dev_err(&hdev->pdev->dev,
4728                         "Set rss tuple fail, status = %d\n", ret);
4729                 return ret;
4730         }
4731
4732         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4733         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4734         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4735         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4736         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4737         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4738         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4739         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4740         hclge_get_rss_type(vport);
4741         return 0;
4742 }
4743
4744 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4745                                      u8 *tuple_sets)
4746 {
4747         switch (flow_type) {
4748         case TCP_V4_FLOW:
4749                 *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4750                 break;
4751         case UDP_V4_FLOW:
4752                 *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4753                 break;
4754         case TCP_V6_FLOW:
4755                 *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4756                 break;
4757         case UDP_V6_FLOW:
4758                 *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4759                 break;
4760         case SCTP_V4_FLOW:
4761                 *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4762                 break;
4763         case SCTP_V6_FLOW:
4764                 *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4765                 break;
4766         case IPV4_FLOW:
4767         case IPV6_FLOW:
4768                 *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4769                 break;
4770         default:
4771                 return -EINVAL;
4772         }
4773
4774         return 0;
4775 }
4776
4777 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4778 {
4779         u64 tuple_data = 0;
4780
4781         if (tuple_sets & HCLGE_D_PORT_BIT)
4782                 tuple_data |= RXH_L4_B_2_3;
4783         if (tuple_sets & HCLGE_S_PORT_BIT)
4784                 tuple_data |= RXH_L4_B_0_1;
4785         if (tuple_sets & HCLGE_D_IP_BIT)
4786                 tuple_data |= RXH_IP_DST;
4787         if (tuple_sets & HCLGE_S_IP_BIT)
4788                 tuple_data |= RXH_IP_SRC;
4789
4790         return tuple_data;
4791 }
4792
4793 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4794                                struct ethtool_rxnfc *nfc)
4795 {
4796         struct hclge_vport *vport = hclge_get_vport(handle);
4797         u8 tuple_sets;
4798         int ret;
4799
4800         nfc->data = 0;
4801
4802         ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4803         if (ret || !tuple_sets)
4804                 return ret;
4805
4806         nfc->data = hclge_convert_rss_tuple(tuple_sets);
4807
4808         return 0;
4809 }
4810
4811 static int hclge_get_tc_size(struct hnae3_handle *handle)
4812 {
4813         struct hclge_vport *vport = hclge_get_vport(handle);
4814         struct hclge_dev *hdev = vport->back;
4815
4816         return hdev->pf_rss_size_max;
4817 }
4818
4819 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4820 {
4821         struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4822         struct hclge_vport *vport = hdev->vport;
4823         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4824         u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4825         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4826         struct hnae3_tc_info *tc_info;
4827         u16 roundup_size;
4828         u16 rss_size;
4829         int i;
4830
4831         tc_info = &vport->nic.kinfo.tc_info;
4832         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4833                 rss_size = tc_info->tqp_count[i];
4834                 tc_valid[i] = 0;
4835
4836                 if (!(hdev->hw_tc_map & BIT(i)))
4837                         continue;
4838
4839                 /* tc_size set to hardware is the log2 of roundup power of two
4840                  * of rss_size, the acutal queue size is limited by indirection
4841                  * table.
4842                  */
4843                 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4844                     rss_size == 0) {
4845                         dev_err(&hdev->pdev->dev,
4846                                 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4847                                 rss_size);
4848                         return -EINVAL;
4849                 }
4850
4851                 roundup_size = roundup_pow_of_two(rss_size);
4852                 roundup_size = ilog2(roundup_size);
4853
4854                 tc_valid[i] = 1;
4855                 tc_size[i] = roundup_size;
4856                 tc_offset[i] = tc_info->tqp_offset[i];
4857         }
4858
4859         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4860 }
4861
4862 int hclge_rss_init_hw(struct hclge_dev *hdev)
4863 {
4864         struct hclge_vport *vport = hdev->vport;
4865         u16 *rss_indir = vport[0].rss_indirection_tbl;
4866         u8 *key = vport[0].rss_hash_key;
4867         u8 hfunc = vport[0].rss_algo;
4868         int ret;
4869
4870         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4871         if (ret)
4872                 return ret;
4873
4874         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4875         if (ret)
4876                 return ret;
4877
4878         ret = hclge_set_rss_input_tuple(hdev);
4879         if (ret)
4880                 return ret;
4881
4882         return hclge_init_rss_tc_mode(hdev);
4883 }
4884
4885 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4886 {
4887         struct hclge_vport *vport = hdev->vport;
4888         int i, j;
4889
4890         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4891                 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
4892                         vport[j].rss_indirection_tbl[i] =
4893                                 i % vport[j].alloc_rss_size;
4894         }
4895 }
4896
4897 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
4898 {
4899         u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
4900         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4901         struct hclge_vport *vport = hdev->vport;
4902
4903         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
4904                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4905
4906         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4907                 u16 *rss_ind_tbl;
4908
4909                 vport[i].rss_tuple_sets.ipv4_tcp_en =
4910                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4911                 vport[i].rss_tuple_sets.ipv4_udp_en =
4912                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4913                 vport[i].rss_tuple_sets.ipv4_sctp_en =
4914                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4915                 vport[i].rss_tuple_sets.ipv4_fragment_en =
4916                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4917                 vport[i].rss_tuple_sets.ipv6_tcp_en =
4918                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4919                 vport[i].rss_tuple_sets.ipv6_udp_en =
4920                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4921                 vport[i].rss_tuple_sets.ipv6_sctp_en =
4922                         hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
4923                         HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
4924                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4925                 vport[i].rss_tuple_sets.ipv6_fragment_en =
4926                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4927
4928                 vport[i].rss_algo = rss_algo;
4929
4930                 rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
4931                                            sizeof(*rss_ind_tbl), GFP_KERNEL);
4932                 if (!rss_ind_tbl)
4933                         return -ENOMEM;
4934
4935                 vport[i].rss_indirection_tbl = rss_ind_tbl;
4936                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4937                        HCLGE_RSS_KEY_SIZE);
4938         }
4939
4940         hclge_rss_indir_init_cfg(hdev);
4941
4942         return 0;
4943 }
4944
4945 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4946                                 int vector_id, bool en,
4947                                 struct hnae3_ring_chain_node *ring_chain)
4948 {
4949         struct hclge_dev *hdev = vport->back;
4950         struct hnae3_ring_chain_node *node;
4951         struct hclge_desc desc;
4952         struct hclge_ctrl_vector_chain_cmd *req =
4953                 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4954         enum hclge_cmd_status status;
4955         enum hclge_opcode_type op;
4956         u16 tqp_type_and_id;
4957         int i;
4958
4959         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4960         hclge_cmd_setup_basic_desc(&desc, op, false);
4961         req->int_vector_id_l = hnae3_get_field(vector_id,
4962                                                HCLGE_VECTOR_ID_L_M,
4963                                                HCLGE_VECTOR_ID_L_S);
4964         req->int_vector_id_h = hnae3_get_field(vector_id,
4965                                                HCLGE_VECTOR_ID_H_M,
4966                                                HCLGE_VECTOR_ID_H_S);
4967
4968         i = 0;
4969         for (node = ring_chain; node; node = node->next) {
4970                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4971                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4972                                 HCLGE_INT_TYPE_S,
4973                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4974                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4975                                 HCLGE_TQP_ID_S, node->tqp_index);
4976                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4977                                 HCLGE_INT_GL_IDX_S,
4978                                 hnae3_get_field(node->int_gl_idx,
4979                                                 HNAE3_RING_GL_IDX_M,
4980                                                 HNAE3_RING_GL_IDX_S));
4981                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4982                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4983                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4984                         req->vfid = vport->vport_id;
4985
4986                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4987                         if (status) {
4988                                 dev_err(&hdev->pdev->dev,
4989                                         "Map TQP fail, status is %d.\n",
4990                                         status);
4991                                 return -EIO;
4992                         }
4993                         i = 0;
4994
4995                         hclge_cmd_setup_basic_desc(&desc,
4996                                                    op,
4997                                                    false);
4998                         req->int_vector_id_l =
4999                                 hnae3_get_field(vector_id,
5000                                                 HCLGE_VECTOR_ID_L_M,
5001                                                 HCLGE_VECTOR_ID_L_S);
5002                         req->int_vector_id_h =
5003                                 hnae3_get_field(vector_id,
5004                                                 HCLGE_VECTOR_ID_H_M,
5005                                                 HCLGE_VECTOR_ID_H_S);
5006                 }
5007         }
5008
5009         if (i > 0) {
5010                 req->int_cause_num = i;
5011                 req->vfid = vport->vport_id;
5012                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5013                 if (status) {
5014                         dev_err(&hdev->pdev->dev,
5015                                 "Map TQP fail, status is %d.\n", status);
5016                         return -EIO;
5017                 }
5018         }
5019
5020         return 0;
5021 }
5022
5023 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5024                                     struct hnae3_ring_chain_node *ring_chain)
5025 {
5026         struct hclge_vport *vport = hclge_get_vport(handle);
5027         struct hclge_dev *hdev = vport->back;
5028         int vector_id;
5029
5030         vector_id = hclge_get_vector_index(hdev, vector);
5031         if (vector_id < 0) {
5032                 dev_err(&hdev->pdev->dev,
5033                         "failed to get vector index. vector=%d\n", vector);
5034                 return vector_id;
5035         }
5036
5037         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5038 }
5039
5040 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5041                                        struct hnae3_ring_chain_node *ring_chain)
5042 {
5043         struct hclge_vport *vport = hclge_get_vport(handle);
5044         struct hclge_dev *hdev = vport->back;
5045         int vector_id, ret;
5046
5047         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5048                 return 0;
5049
5050         vector_id = hclge_get_vector_index(hdev, vector);
5051         if (vector_id < 0) {
5052                 dev_err(&handle->pdev->dev,
5053                         "Get vector index fail. ret =%d\n", vector_id);
5054                 return vector_id;
5055         }
5056
5057         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5058         if (ret)
5059                 dev_err(&handle->pdev->dev,
5060                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5061                         vector_id, ret);
5062
5063         return ret;
5064 }
5065
5066 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5067                                       bool en_uc, bool en_mc, bool en_bc)
5068 {
5069         struct hclge_vport *vport = &hdev->vport[vf_id];
5070         struct hnae3_handle *handle = &vport->nic;
5071         struct hclge_promisc_cfg_cmd *req;
5072         struct hclge_desc desc;
5073         bool uc_tx_en = en_uc;
5074         u8 promisc_cfg = 0;
5075         int ret;
5076
5077         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5078
5079         req = (struct hclge_promisc_cfg_cmd *)desc.data;
5080         req->vf_id = vf_id;
5081
5082         if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5083                 uc_tx_en = false;
5084
5085         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5086         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5087         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5088         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5089         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5090         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5091         req->extend_promisc = promisc_cfg;
5092
5093         /* to be compatible with DEVICE_VERSION_V1/2 */
5094         promisc_cfg = 0;
5095         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5096         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5097         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5098         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5099         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5100         req->promisc = promisc_cfg;
5101
5102         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5103         if (ret)
5104                 dev_err(&hdev->pdev->dev,
5105                         "failed to set vport %u promisc mode, ret = %d.\n",
5106                         vf_id, ret);
5107
5108         return ret;
5109 }
5110
5111 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5112                                  bool en_mc_pmc, bool en_bc_pmc)
5113 {
5114         return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5115                                           en_uc_pmc, en_mc_pmc, en_bc_pmc);
5116 }
5117
5118 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5119                                   bool en_mc_pmc)
5120 {
5121         struct hclge_vport *vport = hclge_get_vport(handle);
5122         struct hclge_dev *hdev = vport->back;
5123         bool en_bc_pmc = true;
5124
5125         /* For device whose version below V2, if broadcast promisc enabled,
5126          * vlan filter is always bypassed. So broadcast promisc should be
5127          * disabled until user enable promisc mode
5128          */
5129         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5130                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5131
5132         return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5133                                             en_bc_pmc);
5134 }
5135
5136 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5137 {
5138         struct hclge_vport *vport = hclge_get_vport(handle);
5139         struct hclge_dev *hdev = vport->back;
5140
5141         set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
5142 }
5143
5144 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5145 {
5146         struct hclge_get_fd_mode_cmd *req;
5147         struct hclge_desc desc;
5148         int ret;
5149
5150         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5151
5152         req = (struct hclge_get_fd_mode_cmd *)desc.data;
5153
5154         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5155         if (ret) {
5156                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5157                 return ret;
5158         }
5159
5160         *fd_mode = req->mode;
5161
5162         return ret;
5163 }
5164
5165 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5166                                    u32 *stage1_entry_num,
5167                                    u32 *stage2_entry_num,
5168                                    u16 *stage1_counter_num,
5169                                    u16 *stage2_counter_num)
5170 {
5171         struct hclge_get_fd_allocation_cmd *req;
5172         struct hclge_desc desc;
5173         int ret;
5174
5175         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5176
5177         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5178
5179         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5180         if (ret) {
5181                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5182                         ret);
5183                 return ret;
5184         }
5185
5186         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5187         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5188         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5189         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5190
5191         return ret;
5192 }
5193
5194 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5195                                    enum HCLGE_FD_STAGE stage_num)
5196 {
5197         struct hclge_set_fd_key_config_cmd *req;
5198         struct hclge_fd_key_cfg *stage;
5199         struct hclge_desc desc;
5200         int ret;
5201
5202         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5203
5204         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5205         stage = &hdev->fd_cfg.key_cfg[stage_num];
5206         req->stage = stage_num;
5207         req->key_select = stage->key_sel;
5208         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5209         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5210         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5211         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5212         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5213         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5214
5215         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5216         if (ret)
5217                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5218
5219         return ret;
5220 }
5221
5222 static int hclge_init_fd_config(struct hclge_dev *hdev)
5223 {
5224 #define LOW_2_WORDS             0x03
5225         struct hclge_fd_key_cfg *key_cfg;
5226         int ret;
5227
5228         if (!hnae3_dev_fd_supported(hdev))
5229                 return 0;
5230
5231         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5232         if (ret)
5233                 return ret;
5234
5235         switch (hdev->fd_cfg.fd_mode) {
5236         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5237                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5238                 break;
5239         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5240                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5241                 break;
5242         default:
5243                 dev_err(&hdev->pdev->dev,
5244                         "Unsupported flow director mode %u\n",
5245                         hdev->fd_cfg.fd_mode);
5246                 return -EOPNOTSUPP;
5247         }
5248
5249         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5250         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5251         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5252         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5253         key_cfg->outer_sipv6_word_en = 0;
5254         key_cfg->outer_dipv6_word_en = 0;
5255
5256         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5257                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5258                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5259                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5260
5261         /* If use max 400bit key, we can support tuples for ether type */
5262         if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
5263                 key_cfg->tuple_active |=
5264                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5265
5266         /* roce_type is used to filter roce frames
5267          * dst_vport is used to specify the rule
5268          */
5269         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5270
5271         ret = hclge_get_fd_allocation(hdev,
5272                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5273                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5274                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5275                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5276         if (ret)
5277                 return ret;
5278
5279         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5280 }
5281
5282 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5283                                 int loc, u8 *key, bool is_add)
5284 {
5285         struct hclge_fd_tcam_config_1_cmd *req1;
5286         struct hclge_fd_tcam_config_2_cmd *req2;
5287         struct hclge_fd_tcam_config_3_cmd *req3;
5288         struct hclge_desc desc[3];
5289         int ret;
5290
5291         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5292         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5293         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5294         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5295         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5296
5297         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5298         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5299         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5300
5301         req1->stage = stage;
5302         req1->xy_sel = sel_x ? 1 : 0;
5303         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5304         req1->index = cpu_to_le32(loc);
5305         req1->entry_vld = sel_x ? is_add : 0;
5306
5307         if (key) {
5308                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5309                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5310                        sizeof(req2->tcam_data));
5311                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5312                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5313         }
5314
5315         ret = hclge_cmd_send(&hdev->hw, desc, 3);
5316         if (ret)
5317                 dev_err(&hdev->pdev->dev,
5318                         "config tcam key fail, ret=%d\n",
5319                         ret);
5320
5321         return ret;
5322 }
5323
5324 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5325                               struct hclge_fd_ad_data *action)
5326 {
5327         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5328         struct hclge_fd_ad_config_cmd *req;
5329         struct hclge_desc desc;
5330         u64 ad_data = 0;
5331         int ret;
5332
5333         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5334
5335         req = (struct hclge_fd_ad_config_cmd *)desc.data;
5336         req->index = cpu_to_le32(loc);
5337         req->stage = stage;
5338
5339         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5340                       action->write_rule_id_to_bd);
5341         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5342                         action->rule_id);
5343         if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5344                 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5345                               action->override_tc);
5346                 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5347                                 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5348         }
5349         ad_data <<= 32;
5350         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5351         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5352                       action->forward_to_direct_queue);
5353         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5354                         action->queue_id);
5355         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5356         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5357                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5358         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5359         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5360                         action->counter_id);
5361
5362         req->ad_data = cpu_to_le64(ad_data);
5363         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5364         if (ret)
5365                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5366
5367         return ret;
5368 }
5369
5370 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5371                                    struct hclge_fd_rule *rule)
5372 {
5373         u16 tmp_x_s, tmp_y_s;
5374         u32 tmp_x_l, tmp_y_l;
5375         int i;
5376
5377         if (rule->unused_tuple & tuple_bit)
5378                 return true;
5379
5380         switch (tuple_bit) {
5381         case BIT(INNER_DST_MAC):
5382                 for (i = 0; i < ETH_ALEN; i++) {
5383                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5384                                rule->tuples_mask.dst_mac[i]);
5385                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5386                                rule->tuples_mask.dst_mac[i]);
5387                 }
5388
5389                 return true;
5390         case BIT(INNER_SRC_MAC):
5391                 for (i = 0; i < ETH_ALEN; i++) {
5392                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5393                                rule->tuples_mask.src_mac[i]);
5394                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5395                                rule->tuples_mask.src_mac[i]);
5396                 }
5397
5398                 return true;
5399         case BIT(INNER_VLAN_TAG_FST):
5400                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5401                        rule->tuples_mask.vlan_tag1);
5402                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5403                        rule->tuples_mask.vlan_tag1);
5404                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5405                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5406
5407                 return true;
5408         case BIT(INNER_ETH_TYPE):
5409                 calc_x(tmp_x_s, rule->tuples.ether_proto,
5410                        rule->tuples_mask.ether_proto);
5411                 calc_y(tmp_y_s, rule->tuples.ether_proto,
5412                        rule->tuples_mask.ether_proto);
5413                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5414                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5415
5416                 return true;
5417         case BIT(INNER_IP_TOS):
5418                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5419                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5420
5421                 return true;
5422         case BIT(INNER_IP_PROTO):
5423                 calc_x(*key_x, rule->tuples.ip_proto,
5424                        rule->tuples_mask.ip_proto);
5425                 calc_y(*key_y, rule->tuples.ip_proto,
5426                        rule->tuples_mask.ip_proto);
5427
5428                 return true;
5429         case BIT(INNER_SRC_IP):
5430                 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5431                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5432                 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5433                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5434                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5435                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5436
5437                 return true;
5438         case BIT(INNER_DST_IP):
5439                 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5440                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5441                 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5442                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5443                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5444                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5445
5446                 return true;
5447         case BIT(INNER_SRC_PORT):
5448                 calc_x(tmp_x_s, rule->tuples.src_port,
5449                        rule->tuples_mask.src_port);
5450                 calc_y(tmp_y_s, rule->tuples.src_port,
5451                        rule->tuples_mask.src_port);
5452                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5453                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5454
5455                 return true;
5456         case BIT(INNER_DST_PORT):
5457                 calc_x(tmp_x_s, rule->tuples.dst_port,
5458                        rule->tuples_mask.dst_port);
5459                 calc_y(tmp_y_s, rule->tuples.dst_port,
5460                        rule->tuples_mask.dst_port);
5461                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5462                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5463
5464                 return true;
5465         default:
5466                 return false;
5467         }
5468 }
5469
5470 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5471                                  u8 vf_id, u8 network_port_id)
5472 {
5473         u32 port_number = 0;
5474
5475         if (port_type == HOST_PORT) {
5476                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5477                                 pf_id);
5478                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5479                                 vf_id);
5480                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5481         } else {
5482                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5483                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5484                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5485         }
5486
5487         return port_number;
5488 }
5489
5490 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5491                                        __le32 *key_x, __le32 *key_y,
5492                                        struct hclge_fd_rule *rule)
5493 {
5494         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5495         u8 cur_pos = 0, tuple_size, shift_bits;
5496         unsigned int i;
5497
5498         for (i = 0; i < MAX_META_DATA; i++) {
5499                 tuple_size = meta_data_key_info[i].key_length;
5500                 tuple_bit = key_cfg->meta_data_active & BIT(i);
5501
5502                 switch (tuple_bit) {
5503                 case BIT(ROCE_TYPE):
5504                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5505                         cur_pos += tuple_size;
5506                         break;
5507                 case BIT(DST_VPORT):
5508                         port_number = hclge_get_port_number(HOST_PORT, 0,
5509                                                             rule->vf_id, 0);
5510                         hnae3_set_field(meta_data,
5511                                         GENMASK(cur_pos + tuple_size, cur_pos),
5512                                         cur_pos, port_number);
5513                         cur_pos += tuple_size;
5514                         break;
5515                 default:
5516                         break;
5517                 }
5518         }
5519
5520         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5521         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5522         shift_bits = sizeof(meta_data) * 8 - cur_pos;
5523
5524         *key_x = cpu_to_le32(tmp_x << shift_bits);
5525         *key_y = cpu_to_le32(tmp_y << shift_bits);
5526 }
5527
5528 /* A complete key is combined with meta data key and tuple key.
5529  * Meta data key is stored at the MSB region, and tuple key is stored at
5530  * the LSB region, unused bits will be filled 0.
5531  */
5532 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5533                             struct hclge_fd_rule *rule)
5534 {
5535         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5536         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5537         u8 *cur_key_x, *cur_key_y;
5538         u8 meta_data_region;
5539         u8 tuple_size;
5540         int ret;
5541         u32 i;
5542
5543         memset(key_x, 0, sizeof(key_x));
5544         memset(key_y, 0, sizeof(key_y));
5545         cur_key_x = key_x;
5546         cur_key_y = key_y;
5547
5548         for (i = 0 ; i < MAX_TUPLE; i++) {
5549                 bool tuple_valid;
5550                 u32 check_tuple;
5551
5552                 tuple_size = tuple_key_info[i].key_length / 8;
5553                 check_tuple = key_cfg->tuple_active & BIT(i);
5554
5555                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5556                                                      cur_key_y, rule);
5557                 if (tuple_valid) {
5558                         cur_key_x += tuple_size;
5559                         cur_key_y += tuple_size;
5560                 }
5561         }
5562
5563         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5564                         MAX_META_DATA_LENGTH / 8;
5565
5566         hclge_fd_convert_meta_data(key_cfg,
5567                                    (__le32 *)(key_x + meta_data_region),
5568                                    (__le32 *)(key_y + meta_data_region),
5569                                    rule);
5570
5571         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5572                                    true);
5573         if (ret) {
5574                 dev_err(&hdev->pdev->dev,
5575                         "fd key_y config fail, loc=%u, ret=%d\n",
5576                         rule->queue_id, ret);
5577                 return ret;
5578         }
5579
5580         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5581                                    true);
5582         if (ret)
5583                 dev_err(&hdev->pdev->dev,
5584                         "fd key_x config fail, loc=%u, ret=%d\n",
5585                         rule->queue_id, ret);
5586         return ret;
5587 }
5588
5589 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5590                                struct hclge_fd_rule *rule)
5591 {
5592         struct hclge_vport *vport = hdev->vport;
5593         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5594         struct hclge_fd_ad_data ad_data;
5595
5596         memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5597         ad_data.ad_id = rule->location;
5598
5599         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5600                 ad_data.drop_packet = true;
5601         } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5602                 ad_data.override_tc = true;
5603                 ad_data.queue_id =
5604                         kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5605                 ad_data.tc_size =
5606                         ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5607         } else {
5608                 ad_data.forward_to_direct_queue = true;
5609                 ad_data.queue_id = rule->queue_id;
5610         }
5611
5612         ad_data.use_counter = false;
5613         ad_data.counter_id = 0;
5614
5615         ad_data.use_next_stage = false;
5616         ad_data.next_input_key = 0;
5617
5618         ad_data.write_rule_id_to_bd = true;
5619         ad_data.rule_id = rule->location;
5620
5621         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5622 }
5623
5624 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5625                                        u32 *unused_tuple)
5626 {
5627         if (!spec || !unused_tuple)
5628                 return -EINVAL;
5629
5630         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5631
5632         if (!spec->ip4src)
5633                 *unused_tuple |= BIT(INNER_SRC_IP);
5634
5635         if (!spec->ip4dst)
5636                 *unused_tuple |= BIT(INNER_DST_IP);
5637
5638         if (!spec->psrc)
5639                 *unused_tuple |= BIT(INNER_SRC_PORT);
5640
5641         if (!spec->pdst)
5642                 *unused_tuple |= BIT(INNER_DST_PORT);
5643
5644         if (!spec->tos)
5645                 *unused_tuple |= BIT(INNER_IP_TOS);
5646
5647         return 0;
5648 }
5649
5650 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5651                                     u32 *unused_tuple)
5652 {
5653         if (!spec || !unused_tuple)
5654                 return -EINVAL;
5655
5656         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5657                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5658
5659         if (!spec->ip4src)
5660                 *unused_tuple |= BIT(INNER_SRC_IP);
5661
5662         if (!spec->ip4dst)
5663                 *unused_tuple |= BIT(INNER_DST_IP);
5664
5665         if (!spec->tos)
5666                 *unused_tuple |= BIT(INNER_IP_TOS);
5667
5668         if (!spec->proto)
5669                 *unused_tuple |= BIT(INNER_IP_PROTO);
5670
5671         if (spec->l4_4_bytes)
5672                 return -EOPNOTSUPP;
5673
5674         if (spec->ip_ver != ETH_RX_NFC_IP4)
5675                 return -EOPNOTSUPP;
5676
5677         return 0;
5678 }
5679
5680 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5681                                        u32 *unused_tuple)
5682 {
5683         if (!spec || !unused_tuple)
5684                 return -EINVAL;
5685
5686         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5687                 BIT(INNER_IP_TOS);
5688
5689         /* check whether src/dst ip address used */
5690         if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5691                 *unused_tuple |= BIT(INNER_SRC_IP);
5692
5693         if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5694                 *unused_tuple |= BIT(INNER_DST_IP);
5695
5696         if (!spec->psrc)
5697                 *unused_tuple |= BIT(INNER_SRC_PORT);
5698
5699         if (!spec->pdst)
5700                 *unused_tuple |= BIT(INNER_DST_PORT);
5701
5702         if (spec->tclass)
5703                 return -EOPNOTSUPP;
5704
5705         return 0;
5706 }
5707
5708 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5709                                     u32 *unused_tuple)
5710 {
5711         if (!spec || !unused_tuple)
5712                 return -EINVAL;
5713
5714         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5715                 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5716
5717         /* check whether src/dst ip address used */
5718         if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5719                 *unused_tuple |= BIT(INNER_SRC_IP);
5720
5721         if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5722                 *unused_tuple |= BIT(INNER_DST_IP);
5723
5724         if (!spec->l4_proto)
5725                 *unused_tuple |= BIT(INNER_IP_PROTO);
5726
5727         if (spec->tclass)
5728                 return -EOPNOTSUPP;
5729
5730         if (spec->l4_4_bytes)
5731                 return -EOPNOTSUPP;
5732
5733         return 0;
5734 }
5735
5736 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5737 {
5738         if (!spec || !unused_tuple)
5739                 return -EINVAL;
5740
5741         *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5742                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5743                 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5744
5745         if (is_zero_ether_addr(spec->h_source))
5746                 *unused_tuple |= BIT(INNER_SRC_MAC);
5747
5748         if (is_zero_ether_addr(spec->h_dest))
5749                 *unused_tuple |= BIT(INNER_DST_MAC);
5750
5751         if (!spec->h_proto)
5752                 *unused_tuple |= BIT(INNER_ETH_TYPE);
5753
5754         return 0;
5755 }
5756
5757 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5758                                     struct ethtool_rx_flow_spec *fs,
5759                                     u32 *unused_tuple)
5760 {
5761         if (fs->flow_type & FLOW_EXT) {
5762                 if (fs->h_ext.vlan_etype) {
5763                         dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5764                         return -EOPNOTSUPP;
5765                 }
5766
5767                 if (!fs->h_ext.vlan_tci)
5768                         *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5769
5770                 if (fs->m_ext.vlan_tci &&
5771                     be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5772                         dev_err(&hdev->pdev->dev,
5773                                 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
5774                                 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5775                         return -EINVAL;
5776                 }
5777         } else {
5778                 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5779         }
5780
5781         if (fs->flow_type & FLOW_MAC_EXT) {
5782                 if (hdev->fd_cfg.fd_mode !=
5783                     HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5784                         dev_err(&hdev->pdev->dev,
5785                                 "FLOW_MAC_EXT is not supported in current fd mode!\n");
5786                         return -EOPNOTSUPP;
5787                 }
5788
5789                 if (is_zero_ether_addr(fs->h_ext.h_dest))
5790                         *unused_tuple |= BIT(INNER_DST_MAC);
5791                 else
5792                         *unused_tuple &= ~BIT(INNER_DST_MAC);
5793         }
5794
5795         return 0;
5796 }
5797
5798 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5799                                struct ethtool_rx_flow_spec *fs,
5800                                u32 *unused_tuple)
5801 {
5802         u32 flow_type;
5803         int ret;
5804
5805         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5806                 dev_err(&hdev->pdev->dev,
5807                         "failed to config fd rules, invalid rule location: %u, max is %u\n.",
5808                         fs->location,
5809                         hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5810                 return -EINVAL;
5811         }
5812
5813         if ((fs->flow_type & FLOW_EXT) &&
5814             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5815                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5816                 return -EOPNOTSUPP;
5817         }
5818
5819         flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5820         switch (flow_type) {
5821         case SCTP_V4_FLOW:
5822         case TCP_V4_FLOW:
5823         case UDP_V4_FLOW:
5824                 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5825                                                   unused_tuple);
5826                 break;
5827         case IP_USER_FLOW:
5828                 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5829                                                unused_tuple);
5830                 break;
5831         case SCTP_V6_FLOW:
5832         case TCP_V6_FLOW:
5833         case UDP_V6_FLOW:
5834                 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5835                                                   unused_tuple);
5836                 break;
5837         case IPV6_USER_FLOW:
5838                 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5839                                                unused_tuple);
5840                 break;
5841         case ETHER_FLOW:
5842                 if (hdev->fd_cfg.fd_mode !=
5843                         HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5844                         dev_err(&hdev->pdev->dev,
5845                                 "ETHER_FLOW is not supported in current fd mode!\n");
5846                         return -EOPNOTSUPP;
5847                 }
5848
5849                 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5850                                                  unused_tuple);
5851                 break;
5852         default:
5853                 dev_err(&hdev->pdev->dev,
5854                         "unsupported protocol type, protocol type = %#x\n",
5855                         flow_type);
5856                 return -EOPNOTSUPP;
5857         }
5858
5859         if (ret) {
5860                 dev_err(&hdev->pdev->dev,
5861                         "failed to check flow union tuple, ret = %d\n",
5862                         ret);
5863                 return ret;
5864         }
5865
5866         return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5867 }
5868
5869 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5870 {
5871         struct hclge_fd_rule *rule = NULL;
5872         struct hlist_node *node2;
5873
5874         spin_lock_bh(&hdev->fd_rule_lock);
5875         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5876                 if (rule->location >= location)
5877                         break;
5878         }
5879
5880         spin_unlock_bh(&hdev->fd_rule_lock);
5881
5882         return  rule && rule->location == location;
5883 }
5884
5885 /* make sure being called after lock up with fd_rule_lock */
5886 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5887                                      struct hclge_fd_rule *new_rule,
5888                                      u16 location,
5889                                      bool is_add)
5890 {
5891         struct hclge_fd_rule *rule = NULL, *parent = NULL;
5892         struct hlist_node *node2;
5893
5894         if (is_add && !new_rule)
5895                 return -EINVAL;
5896
5897         hlist_for_each_entry_safe(rule, node2,
5898                                   &hdev->fd_rule_list, rule_node) {
5899                 if (rule->location >= location)
5900                         break;
5901                 parent = rule;
5902         }
5903
5904         if (rule && rule->location == location) {
5905                 hlist_del(&rule->rule_node);
5906                 kfree(rule);
5907                 hdev->hclge_fd_rule_num--;
5908
5909                 if (!is_add) {
5910                         if (!hdev->hclge_fd_rule_num)
5911                                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5912                         clear_bit(location, hdev->fd_bmap);
5913
5914                         return 0;
5915                 }
5916         } else if (!is_add) {
5917                 dev_err(&hdev->pdev->dev,
5918                         "delete fail, rule %u is inexistent\n",
5919                         location);
5920                 return -EINVAL;
5921         }
5922
5923         INIT_HLIST_NODE(&new_rule->rule_node);
5924
5925         if (parent)
5926                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5927         else
5928                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5929
5930         set_bit(location, hdev->fd_bmap);
5931         hdev->hclge_fd_rule_num++;
5932         hdev->fd_active_type = new_rule->rule_type;
5933
5934         return 0;
5935 }
5936
5937 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5938                               struct ethtool_rx_flow_spec *fs,
5939                               struct hclge_fd_rule *rule)
5940 {
5941         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5942
5943         switch (flow_type) {
5944         case SCTP_V4_FLOW:
5945         case TCP_V4_FLOW:
5946         case UDP_V4_FLOW:
5947                 rule->tuples.src_ip[IPV4_INDEX] =
5948                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5949                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5950                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5951
5952                 rule->tuples.dst_ip[IPV4_INDEX] =
5953                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5954                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5955                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5956
5957                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5958                 rule->tuples_mask.src_port =
5959                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5960
5961                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5962                 rule->tuples_mask.dst_port =
5963                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5964
5965                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5966                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5967
5968                 rule->tuples.ether_proto = ETH_P_IP;
5969                 rule->tuples_mask.ether_proto = 0xFFFF;
5970
5971                 break;
5972         case IP_USER_FLOW:
5973                 rule->tuples.src_ip[IPV4_INDEX] =
5974                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5975                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5976                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5977
5978                 rule->tuples.dst_ip[IPV4_INDEX] =
5979                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5980                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5981                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5982
5983                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5984                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5985
5986                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5987                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5988
5989                 rule->tuples.ether_proto = ETH_P_IP;
5990                 rule->tuples_mask.ether_proto = 0xFFFF;
5991
5992                 break;
5993         case SCTP_V6_FLOW:
5994         case TCP_V6_FLOW:
5995         case UDP_V6_FLOW:
5996                 be32_to_cpu_array(rule->tuples.src_ip,
5997                                   fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5998                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5999                                   fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
6000
6001                 be32_to_cpu_array(rule->tuples.dst_ip,
6002                                   fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
6003                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
6004                                   fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
6005
6006                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6007                 rule->tuples_mask.src_port =
6008                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6009
6010                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6011                 rule->tuples_mask.dst_port =
6012                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6013
6014                 rule->tuples.ether_proto = ETH_P_IPV6;
6015                 rule->tuples_mask.ether_proto = 0xFFFF;
6016
6017                 break;
6018         case IPV6_USER_FLOW:
6019                 be32_to_cpu_array(rule->tuples.src_ip,
6020                                   fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
6021                 be32_to_cpu_array(rule->tuples_mask.src_ip,
6022                                   fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
6023
6024                 be32_to_cpu_array(rule->tuples.dst_ip,
6025                                   fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
6026                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
6027                                   fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
6028
6029                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6030                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6031
6032                 rule->tuples.ether_proto = ETH_P_IPV6;
6033                 rule->tuples_mask.ether_proto = 0xFFFF;
6034
6035                 break;
6036         case ETHER_FLOW:
6037                 ether_addr_copy(rule->tuples.src_mac,
6038                                 fs->h_u.ether_spec.h_source);
6039                 ether_addr_copy(rule->tuples_mask.src_mac,
6040                                 fs->m_u.ether_spec.h_source);
6041
6042                 ether_addr_copy(rule->tuples.dst_mac,
6043                                 fs->h_u.ether_spec.h_dest);
6044                 ether_addr_copy(rule->tuples_mask.dst_mac,
6045                                 fs->m_u.ether_spec.h_dest);
6046
6047                 rule->tuples.ether_proto =
6048                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
6049                 rule->tuples_mask.ether_proto =
6050                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
6051
6052                 break;
6053         default:
6054                 return -EOPNOTSUPP;
6055         }
6056
6057         switch (flow_type) {
6058         case SCTP_V4_FLOW:
6059         case SCTP_V6_FLOW:
6060                 rule->tuples.ip_proto = IPPROTO_SCTP;
6061                 rule->tuples_mask.ip_proto = 0xFF;
6062                 break;
6063         case TCP_V4_FLOW:
6064         case TCP_V6_FLOW:
6065                 rule->tuples.ip_proto = IPPROTO_TCP;
6066                 rule->tuples_mask.ip_proto = 0xFF;
6067                 break;
6068         case UDP_V4_FLOW:
6069         case UDP_V6_FLOW:
6070                 rule->tuples.ip_proto = IPPROTO_UDP;
6071                 rule->tuples_mask.ip_proto = 0xFF;
6072                 break;
6073         default:
6074                 break;
6075         }
6076
6077         if (fs->flow_type & FLOW_EXT) {
6078                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6079                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6080         }
6081
6082         if (fs->flow_type & FLOW_MAC_EXT) {
6083                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6084                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6085         }
6086
6087         return 0;
6088 }
6089
6090 /* make sure being called after lock up with fd_rule_lock */
6091 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6092                                 struct hclge_fd_rule *rule)
6093 {
6094         int ret;
6095
6096         if (!rule) {
6097                 dev_err(&hdev->pdev->dev,
6098                         "The flow director rule is NULL\n");
6099                 return -EINVAL;
6100         }
6101
6102         /* it will never fail here, so needn't to check return value */
6103         hclge_fd_update_rule_list(hdev, rule, rule->location, true);
6104
6105         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6106         if (ret)
6107                 goto clear_rule;
6108
6109         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6110         if (ret)
6111                 goto clear_rule;
6112
6113         return 0;
6114
6115 clear_rule:
6116         hclge_fd_update_rule_list(hdev, rule, rule->location, false);
6117         return ret;
6118 }
6119
6120 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6121 {
6122         struct hclge_vport *vport = hclge_get_vport(handle);
6123         struct hclge_dev *hdev = vport->back;
6124
6125         return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6126 }
6127
6128 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6129                               struct ethtool_rxnfc *cmd)
6130 {
6131         struct hclge_vport *vport = hclge_get_vport(handle);
6132         struct hclge_dev *hdev = vport->back;
6133         u16 dst_vport_id = 0, q_index = 0;
6134         struct ethtool_rx_flow_spec *fs;
6135         struct hclge_fd_rule *rule;
6136         u32 unused = 0;
6137         u8 action;
6138         int ret;
6139
6140         if (!hnae3_dev_fd_supported(hdev)) {
6141                 dev_err(&hdev->pdev->dev,
6142                         "flow table director is not supported\n");
6143                 return -EOPNOTSUPP;
6144         }
6145
6146         if (!hdev->fd_en) {
6147                 dev_err(&hdev->pdev->dev,
6148                         "please enable flow director first\n");
6149                 return -EOPNOTSUPP;
6150         }
6151
6152         if (hclge_is_cls_flower_active(handle)) {
6153                 dev_err(&hdev->pdev->dev,
6154                         "please delete all exist cls flower rules first\n");
6155                 return -EINVAL;
6156         }
6157
6158         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6159
6160         ret = hclge_fd_check_spec(hdev, fs, &unused);
6161         if (ret)
6162                 return ret;
6163
6164         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
6165                 action = HCLGE_FD_ACTION_DROP_PACKET;
6166         } else {
6167                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
6168                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
6169                 u16 tqps;
6170
6171                 if (vf > hdev->num_req_vfs) {
6172                         dev_err(&hdev->pdev->dev,
6173                                 "Error: vf id (%u) > max vf num (%u)\n",
6174                                 vf, hdev->num_req_vfs);
6175                         return -EINVAL;
6176                 }
6177
6178                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6179                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
6180
6181                 if (ring >= tqps) {
6182                         dev_err(&hdev->pdev->dev,
6183                                 "Error: queue id (%u) > max tqp num (%u)\n",
6184                                 ring, tqps - 1);
6185                         return -EINVAL;
6186                 }
6187
6188                 action = HCLGE_FD_ACTION_SELECT_QUEUE;
6189                 q_index = ring;
6190         }
6191
6192         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6193         if (!rule)
6194                 return -ENOMEM;
6195
6196         ret = hclge_fd_get_tuple(hdev, fs, rule);
6197         if (ret) {
6198                 kfree(rule);
6199                 return ret;
6200         }
6201
6202         rule->flow_type = fs->flow_type;
6203         rule->location = fs->location;
6204         rule->unused_tuple = unused;
6205         rule->vf_id = dst_vport_id;
6206         rule->queue_id = q_index;
6207         rule->action = action;
6208         rule->rule_type = HCLGE_FD_EP_ACTIVE;
6209
6210         /* to avoid rule conflict, when user configure rule by ethtool,
6211          * we need to clear all arfs rules
6212          */
6213         spin_lock_bh(&hdev->fd_rule_lock);
6214         hclge_clear_arfs_rules(handle);
6215
6216         ret = hclge_fd_config_rule(hdev, rule);
6217
6218         spin_unlock_bh(&hdev->fd_rule_lock);
6219
6220         return ret;
6221 }
6222
6223 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6224                               struct ethtool_rxnfc *cmd)
6225 {
6226         struct hclge_vport *vport = hclge_get_vport(handle);
6227         struct hclge_dev *hdev = vport->back;
6228         struct ethtool_rx_flow_spec *fs;
6229         int ret;
6230
6231         if (!hnae3_dev_fd_supported(hdev))
6232                 return -EOPNOTSUPP;
6233
6234         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6235
6236         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6237                 return -EINVAL;
6238
6239         if (hclge_is_cls_flower_active(handle) || !hdev->hclge_fd_rule_num ||
6240             !hclge_fd_rule_exist(hdev, fs->location)) {
6241                 dev_err(&hdev->pdev->dev,
6242                         "Delete fail, rule %u is inexistent\n", fs->location);
6243                 return -ENOENT;
6244         }
6245
6246         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6247                                    NULL, false);
6248         if (ret)
6249                 return ret;
6250
6251         spin_lock_bh(&hdev->fd_rule_lock);
6252         ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
6253
6254         spin_unlock_bh(&hdev->fd_rule_lock);
6255
6256         return ret;
6257 }
6258
6259 /* make sure being called after lock up with fd_rule_lock */
6260 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
6261                                      bool clear_list)
6262 {
6263         struct hclge_vport *vport = hclge_get_vport(handle);
6264         struct hclge_dev *hdev = vport->back;
6265         struct hclge_fd_rule *rule;
6266         struct hlist_node *node;
6267         u16 location;
6268
6269         if (!hnae3_dev_fd_supported(hdev))
6270                 return;
6271
6272         for_each_set_bit(location, hdev->fd_bmap,
6273                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6274                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6275                                      NULL, false);
6276
6277         if (clear_list) {
6278                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6279                                           rule_node) {
6280                         hlist_del(&rule->rule_node);
6281                         kfree(rule);
6282                 }
6283                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6284                 hdev->hclge_fd_rule_num = 0;
6285                 bitmap_zero(hdev->fd_bmap,
6286                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6287         }
6288 }
6289
6290 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6291 {
6292         struct hclge_vport *vport = hclge_get_vport(handle);
6293         struct hclge_dev *hdev = vport->back;
6294         struct hclge_fd_rule *rule;
6295         struct hlist_node *node;
6296         int ret;
6297
6298         /* Return ok here, because reset error handling will check this
6299          * return value. If error is returned here, the reset process will
6300          * fail.
6301          */
6302         if (!hnae3_dev_fd_supported(hdev))
6303                 return 0;
6304
6305         /* if fd is disabled, should not restore it when reset */
6306         if (!hdev->fd_en)
6307                 return 0;
6308
6309         spin_lock_bh(&hdev->fd_rule_lock);
6310         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6311                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6312                 if (!ret)
6313                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6314
6315                 if (ret) {
6316                         dev_warn(&hdev->pdev->dev,
6317                                  "Restore rule %u failed, remove it\n",
6318                                  rule->location);
6319                         clear_bit(rule->location, hdev->fd_bmap);
6320                         hlist_del(&rule->rule_node);
6321                         kfree(rule);
6322                         hdev->hclge_fd_rule_num--;
6323                 }
6324         }
6325
6326         if (hdev->hclge_fd_rule_num)
6327                 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
6328
6329         spin_unlock_bh(&hdev->fd_rule_lock);
6330
6331         return 0;
6332 }
6333
6334 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6335                                  struct ethtool_rxnfc *cmd)
6336 {
6337         struct hclge_vport *vport = hclge_get_vport(handle);
6338         struct hclge_dev *hdev = vport->back;
6339
6340         if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6341                 return -EOPNOTSUPP;
6342
6343         cmd->rule_cnt = hdev->hclge_fd_rule_num;
6344         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6345
6346         return 0;
6347 }
6348
6349 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6350                                      struct ethtool_tcpip4_spec *spec,
6351                                      struct ethtool_tcpip4_spec *spec_mask)
6352 {
6353         spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6354         spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6355                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6356
6357         spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6358         spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6359                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6360
6361         spec->psrc = cpu_to_be16(rule->tuples.src_port);
6362         spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6363                         0 : cpu_to_be16(rule->tuples_mask.src_port);
6364
6365         spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6366         spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6367                         0 : cpu_to_be16(rule->tuples_mask.dst_port);
6368
6369         spec->tos = rule->tuples.ip_tos;
6370         spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6371                         0 : rule->tuples_mask.ip_tos;
6372 }
6373
6374 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6375                                   struct ethtool_usrip4_spec *spec,
6376                                   struct ethtool_usrip4_spec *spec_mask)
6377 {
6378         spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6379         spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6380                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6381
6382         spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6383         spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6384                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6385
6386         spec->tos = rule->tuples.ip_tos;
6387         spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6388                         0 : rule->tuples_mask.ip_tos;
6389
6390         spec->proto = rule->tuples.ip_proto;
6391         spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6392                         0 : rule->tuples_mask.ip_proto;
6393
6394         spec->ip_ver = ETH_RX_NFC_IP4;
6395 }
6396
6397 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6398                                      struct ethtool_tcpip6_spec *spec,
6399                                      struct ethtool_tcpip6_spec *spec_mask)
6400 {
6401         cpu_to_be32_array(spec->ip6src,
6402                           rule->tuples.src_ip, IPV6_SIZE);
6403         cpu_to_be32_array(spec->ip6dst,
6404                           rule->tuples.dst_ip, IPV6_SIZE);
6405         if (rule->unused_tuple & BIT(INNER_SRC_IP))
6406                 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6407         else
6408                 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6409                                   IPV6_SIZE);
6410
6411         if (rule->unused_tuple & BIT(INNER_DST_IP))
6412                 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6413         else
6414                 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6415                                   IPV6_SIZE);
6416
6417         spec->psrc = cpu_to_be16(rule->tuples.src_port);
6418         spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6419                         0 : cpu_to_be16(rule->tuples_mask.src_port);
6420
6421         spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6422         spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6423                         0 : cpu_to_be16(rule->tuples_mask.dst_port);
6424 }
6425
6426 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6427                                   struct ethtool_usrip6_spec *spec,
6428                                   struct ethtool_usrip6_spec *spec_mask)
6429 {
6430         cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6431         cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6432         if (rule->unused_tuple & BIT(INNER_SRC_IP))
6433                 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6434         else
6435                 cpu_to_be32_array(spec_mask->ip6src,
6436                                   rule->tuples_mask.src_ip, IPV6_SIZE);
6437
6438         if (rule->unused_tuple & BIT(INNER_DST_IP))
6439                 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6440         else
6441                 cpu_to_be32_array(spec_mask->ip6dst,
6442                                   rule->tuples_mask.dst_ip, IPV6_SIZE);
6443
6444         spec->l4_proto = rule->tuples.ip_proto;
6445         spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6446                         0 : rule->tuples_mask.ip_proto;
6447 }
6448
6449 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6450                                     struct ethhdr *spec,
6451                                     struct ethhdr *spec_mask)
6452 {
6453         ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6454         ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6455
6456         if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6457                 eth_zero_addr(spec_mask->h_source);
6458         else
6459                 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6460
6461         if (rule->unused_tuple & BIT(INNER_DST_MAC))
6462                 eth_zero_addr(spec_mask->h_dest);
6463         else
6464                 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6465
6466         spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6467         spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6468                         0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6469 }
6470
6471 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6472                                   struct hclge_fd_rule *rule)
6473 {
6474         if (fs->flow_type & FLOW_EXT) {
6475                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6476                 fs->m_ext.vlan_tci =
6477                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6478                                 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6479         }
6480
6481         if (fs->flow_type & FLOW_MAC_EXT) {
6482                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6483                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6484                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
6485                 else
6486                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
6487                                         rule->tuples_mask.dst_mac);
6488         }
6489 }
6490
6491 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6492                                   struct ethtool_rxnfc *cmd)
6493 {
6494         struct hclge_vport *vport = hclge_get_vport(handle);
6495         struct hclge_fd_rule *rule = NULL;
6496         struct hclge_dev *hdev = vport->back;
6497         struct ethtool_rx_flow_spec *fs;
6498         struct hlist_node *node2;
6499
6500         if (!hnae3_dev_fd_supported(hdev))
6501                 return -EOPNOTSUPP;
6502
6503         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6504
6505         spin_lock_bh(&hdev->fd_rule_lock);
6506
6507         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6508                 if (rule->location >= fs->location)
6509                         break;
6510         }
6511
6512         if (!rule || fs->location != rule->location) {
6513                 spin_unlock_bh(&hdev->fd_rule_lock);
6514
6515                 return -ENOENT;
6516         }
6517
6518         fs->flow_type = rule->flow_type;
6519         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6520         case SCTP_V4_FLOW:
6521         case TCP_V4_FLOW:
6522         case UDP_V4_FLOW:
6523                 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6524                                          &fs->m_u.tcp_ip4_spec);
6525                 break;
6526         case IP_USER_FLOW:
6527                 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6528                                       &fs->m_u.usr_ip4_spec);
6529                 break;
6530         case SCTP_V6_FLOW:
6531         case TCP_V6_FLOW:
6532         case UDP_V6_FLOW:
6533                 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6534                                          &fs->m_u.tcp_ip6_spec);
6535                 break;
6536         case IPV6_USER_FLOW:
6537                 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6538                                       &fs->m_u.usr_ip6_spec);
6539                 break;
6540         /* The flow type of fd rule has been checked before adding in to rule
6541          * list. As other flow types have been handled, it must be ETHER_FLOW
6542          * for the default case
6543          */
6544         default:
6545                 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6546                                         &fs->m_u.ether_spec);
6547                 break;
6548         }
6549
6550         hclge_fd_get_ext_info(fs, rule);
6551
6552         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6553                 fs->ring_cookie = RX_CLS_FLOW_DISC;
6554         } else {
6555                 u64 vf_id;
6556
6557                 fs->ring_cookie = rule->queue_id;
6558                 vf_id = rule->vf_id;
6559                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6560                 fs->ring_cookie |= vf_id;
6561         }
6562
6563         spin_unlock_bh(&hdev->fd_rule_lock);
6564
6565         return 0;
6566 }
6567
6568 static int hclge_get_all_rules(struct hnae3_handle *handle,
6569                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
6570 {
6571         struct hclge_vport *vport = hclge_get_vport(handle);
6572         struct hclge_dev *hdev = vport->back;
6573         struct hclge_fd_rule *rule;
6574         struct hlist_node *node2;
6575         int cnt = 0;
6576
6577         if (!hnae3_dev_fd_supported(hdev))
6578                 return -EOPNOTSUPP;
6579
6580         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6581
6582         spin_lock_bh(&hdev->fd_rule_lock);
6583         hlist_for_each_entry_safe(rule, node2,
6584                                   &hdev->fd_rule_list, rule_node) {
6585                 if (cnt == cmd->rule_cnt) {
6586                         spin_unlock_bh(&hdev->fd_rule_lock);
6587                         return -EMSGSIZE;
6588                 }
6589
6590                 rule_locs[cnt] = rule->location;
6591                 cnt++;
6592         }
6593
6594         spin_unlock_bh(&hdev->fd_rule_lock);
6595
6596         cmd->rule_cnt = cnt;
6597
6598         return 0;
6599 }
6600
6601 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6602                                      struct hclge_fd_rule_tuples *tuples)
6603 {
6604 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6605 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6606
6607         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6608         tuples->ip_proto = fkeys->basic.ip_proto;
6609         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6610
6611         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6612                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6613                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6614         } else {
6615                 int i;
6616
6617                 for (i = 0; i < IPV6_SIZE; i++) {
6618                         tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6619                         tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6620                 }
6621         }
6622 }
6623
6624 /* traverse all rules, check whether an existed rule has the same tuples */
6625 static struct hclge_fd_rule *
6626 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6627                           const struct hclge_fd_rule_tuples *tuples)
6628 {
6629         struct hclge_fd_rule *rule = NULL;
6630         struct hlist_node *node;
6631
6632         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6633                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6634                         return rule;
6635         }
6636
6637         return NULL;
6638 }
6639
6640 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6641                                      struct hclge_fd_rule *rule)
6642 {
6643         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6644                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6645                              BIT(INNER_SRC_PORT);
6646         rule->action = 0;
6647         rule->vf_id = 0;
6648         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6649         if (tuples->ether_proto == ETH_P_IP) {
6650                 if (tuples->ip_proto == IPPROTO_TCP)
6651                         rule->flow_type = TCP_V4_FLOW;
6652                 else
6653                         rule->flow_type = UDP_V4_FLOW;
6654         } else {
6655                 if (tuples->ip_proto == IPPROTO_TCP)
6656                         rule->flow_type = TCP_V6_FLOW;
6657                 else
6658                         rule->flow_type = UDP_V6_FLOW;
6659         }
6660         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6661         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6662 }
6663
6664 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6665                                       u16 flow_id, struct flow_keys *fkeys)
6666 {
6667         struct hclge_vport *vport = hclge_get_vport(handle);
6668         struct hclge_fd_rule_tuples new_tuples = {};
6669         struct hclge_dev *hdev = vport->back;
6670         struct hclge_fd_rule *rule;
6671         u16 tmp_queue_id;
6672         u16 bit_id;
6673         int ret;
6674
6675         if (!hnae3_dev_fd_supported(hdev))
6676                 return -EOPNOTSUPP;
6677
6678         /* when there is already fd rule existed add by user,
6679          * arfs should not work
6680          */
6681         spin_lock_bh(&hdev->fd_rule_lock);
6682         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
6683             hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
6684                 spin_unlock_bh(&hdev->fd_rule_lock);
6685                 return -EOPNOTSUPP;
6686         }
6687
6688         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6689
6690         /* check is there flow director filter existed for this flow,
6691          * if not, create a new filter for it;
6692          * if filter exist with different queue id, modify the filter;
6693          * if filter exist with same queue id, do nothing
6694          */
6695         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6696         if (!rule) {
6697                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6698                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6699                         spin_unlock_bh(&hdev->fd_rule_lock);
6700                         return -ENOSPC;
6701                 }
6702
6703                 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6704                 if (!rule) {
6705                         spin_unlock_bh(&hdev->fd_rule_lock);
6706                         return -ENOMEM;
6707                 }
6708
6709                 set_bit(bit_id, hdev->fd_bmap);
6710                 rule->location = bit_id;
6711                 rule->arfs.flow_id = flow_id;
6712                 rule->queue_id = queue_id;
6713                 hclge_fd_build_arfs_rule(&new_tuples, rule);
6714                 ret = hclge_fd_config_rule(hdev, rule);
6715
6716                 spin_unlock_bh(&hdev->fd_rule_lock);
6717
6718                 if (ret)
6719                         return ret;
6720
6721                 return rule->location;
6722         }
6723
6724         spin_unlock_bh(&hdev->fd_rule_lock);
6725
6726         if (rule->queue_id == queue_id)
6727                 return rule->location;
6728
6729         tmp_queue_id = rule->queue_id;
6730         rule->queue_id = queue_id;
6731         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6732         if (ret) {
6733                 rule->queue_id = tmp_queue_id;
6734                 return ret;
6735         }
6736
6737         return rule->location;
6738 }
6739
6740 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6741 {
6742 #ifdef CONFIG_RFS_ACCEL
6743         struct hnae3_handle *handle = &hdev->vport[0].nic;
6744         struct hclge_fd_rule *rule;
6745         struct hlist_node *node;
6746         HLIST_HEAD(del_list);
6747
6748         spin_lock_bh(&hdev->fd_rule_lock);
6749         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6750                 spin_unlock_bh(&hdev->fd_rule_lock);
6751                 return;
6752         }
6753         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6754                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6755                                         rule->arfs.flow_id, rule->location)) {
6756                         hlist_del_init(&rule->rule_node);
6757                         hlist_add_head(&rule->rule_node, &del_list);
6758                         hdev->hclge_fd_rule_num--;
6759                         clear_bit(rule->location, hdev->fd_bmap);
6760                 }
6761         }
6762         spin_unlock_bh(&hdev->fd_rule_lock);
6763
6764         hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6765                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6766                                      rule->location, NULL, false);
6767                 kfree(rule);
6768         }
6769 #endif
6770 }
6771
6772 /* make sure being called after lock up with fd_rule_lock */
6773 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6774 {
6775 #ifdef CONFIG_RFS_ACCEL
6776         struct hclge_vport *vport = hclge_get_vport(handle);
6777         struct hclge_dev *hdev = vport->back;
6778
6779         if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6780                 hclge_del_all_fd_entries(handle, true);
6781 #endif
6782 }
6783
6784 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
6785                                     struct hclge_fd_rule *rule)
6786 {
6787         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
6788                 struct flow_match_basic match;
6789                 u16 ethtype_key, ethtype_mask;
6790
6791                 flow_rule_match_basic(flow, &match);
6792                 ethtype_key = ntohs(match.key->n_proto);
6793                 ethtype_mask = ntohs(match.mask->n_proto);
6794
6795                 if (ethtype_key == ETH_P_ALL) {
6796                         ethtype_key = 0;
6797                         ethtype_mask = 0;
6798                 }
6799                 rule->tuples.ether_proto = ethtype_key;
6800                 rule->tuples_mask.ether_proto = ethtype_mask;
6801                 rule->tuples.ip_proto = match.key->ip_proto;
6802                 rule->tuples_mask.ip_proto = match.mask->ip_proto;
6803         } else {
6804                 rule->unused_tuple |= BIT(INNER_IP_PROTO);
6805                 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
6806         }
6807 }
6808
6809 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
6810                                   struct hclge_fd_rule *rule)
6811 {
6812         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
6813                 struct flow_match_eth_addrs match;
6814
6815                 flow_rule_match_eth_addrs(flow, &match);
6816                 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
6817                 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
6818                 ether_addr_copy(rule->tuples.src_mac, match.key->src);
6819                 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
6820         } else {
6821                 rule->unused_tuple |= BIT(INNER_DST_MAC);
6822                 rule->unused_tuple |= BIT(INNER_SRC_MAC);
6823         }
6824 }
6825
6826 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
6827                                    struct hclge_fd_rule *rule)
6828 {
6829         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
6830                 struct flow_match_vlan match;
6831
6832                 flow_rule_match_vlan(flow, &match);
6833                 rule->tuples.vlan_tag1 = match.key->vlan_id |
6834                                 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
6835                 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
6836                                 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
6837         } else {
6838                 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6839         }
6840 }
6841
6842 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
6843                                  struct hclge_fd_rule *rule)
6844 {
6845         u16 addr_type = 0;
6846
6847         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
6848                 struct flow_match_control match;
6849
6850                 flow_rule_match_control(flow, &match);
6851                 addr_type = match.key->addr_type;
6852         }
6853
6854         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
6855                 struct flow_match_ipv4_addrs match;
6856
6857                 flow_rule_match_ipv4_addrs(flow, &match);
6858                 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
6859                 rule->tuples_mask.src_ip[IPV4_INDEX] =
6860                                                 be32_to_cpu(match.mask->src);
6861                 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
6862                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6863                                                 be32_to_cpu(match.mask->dst);
6864         } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
6865                 struct flow_match_ipv6_addrs match;
6866
6867                 flow_rule_match_ipv6_addrs(flow, &match);
6868                 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
6869                                   IPV6_SIZE);
6870                 be32_to_cpu_array(rule->tuples_mask.src_ip,
6871                                   match.mask->src.s6_addr32, IPV6_SIZE);
6872                 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
6873                                   IPV6_SIZE);
6874                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
6875                                   match.mask->dst.s6_addr32, IPV6_SIZE);
6876         } else {
6877                 rule->unused_tuple |= BIT(INNER_SRC_IP);
6878                 rule->unused_tuple |= BIT(INNER_DST_IP);
6879         }
6880 }
6881
6882 static void hclge_get_cls_key_port(const struct flow_rule *flow,
6883                                    struct hclge_fd_rule *rule)
6884 {
6885         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
6886                 struct flow_match_ports match;
6887
6888                 flow_rule_match_ports(flow, &match);
6889
6890                 rule->tuples.src_port = be16_to_cpu(match.key->src);
6891                 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
6892                 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
6893                 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
6894         } else {
6895                 rule->unused_tuple |= BIT(INNER_SRC_PORT);
6896                 rule->unused_tuple |= BIT(INNER_DST_PORT);
6897         }
6898 }
6899
6900 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
6901                                   struct flow_cls_offload *cls_flower,
6902                                   struct hclge_fd_rule *rule)
6903 {
6904         struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
6905         struct flow_dissector *dissector = flow->match.dissector;
6906
6907         if (dissector->used_keys &
6908             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
6909               BIT(FLOW_DISSECTOR_KEY_BASIC) |
6910               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
6911               BIT(FLOW_DISSECTOR_KEY_VLAN) |
6912               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
6913               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
6914               BIT(FLOW_DISSECTOR_KEY_PORTS))) {
6915                 dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
6916                         dissector->used_keys);
6917                 return -EOPNOTSUPP;
6918         }
6919
6920         hclge_get_cls_key_basic(flow, rule);
6921         hclge_get_cls_key_mac(flow, rule);
6922         hclge_get_cls_key_vlan(flow, rule);
6923         hclge_get_cls_key_ip(flow, rule);
6924         hclge_get_cls_key_port(flow, rule);
6925
6926         return 0;
6927 }
6928
6929 static int hclge_check_cls_flower(struct hclge_dev *hdev,
6930                                   struct flow_cls_offload *cls_flower, int tc)
6931 {
6932         u32 prio = cls_flower->common.prio;
6933
6934         if (tc < 0 || tc > hdev->tc_max) {
6935                 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
6936                 return -EINVAL;
6937         }
6938
6939         if (prio == 0 ||
6940             prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6941                 dev_err(&hdev->pdev->dev,
6942                         "prio %u should be in range[1, %u]\n",
6943                         prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6944                 return -EINVAL;
6945         }
6946
6947         if (test_bit(prio - 1, hdev->fd_bmap)) {
6948                 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
6949                 return -EINVAL;
6950         }
6951         return 0;
6952 }
6953
6954 static int hclge_add_cls_flower(struct hnae3_handle *handle,
6955                                 struct flow_cls_offload *cls_flower,
6956                                 int tc)
6957 {
6958         struct hclge_vport *vport = hclge_get_vport(handle);
6959         struct hclge_dev *hdev = vport->back;
6960         struct hclge_fd_rule *rule;
6961         int ret;
6962
6963         if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6964                 dev_err(&hdev->pdev->dev,
6965                         "please remove all exist fd rules via ethtool first\n");
6966                 return -EINVAL;
6967         }
6968
6969         ret = hclge_check_cls_flower(hdev, cls_flower, tc);
6970         if (ret) {
6971                 dev_err(&hdev->pdev->dev,
6972                         "failed to check cls flower params, ret = %d\n", ret);
6973                 return ret;
6974         }
6975
6976         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6977         if (!rule)
6978                 return -ENOMEM;
6979
6980         ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
6981         if (ret)
6982                 goto err;
6983
6984         rule->action = HCLGE_FD_ACTION_SELECT_TC;
6985         rule->cls_flower.tc = tc;
6986         rule->location = cls_flower->common.prio - 1;
6987         rule->vf_id = 0;
6988         rule->cls_flower.cookie = cls_flower->cookie;
6989         rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
6990
6991         spin_lock_bh(&hdev->fd_rule_lock);
6992         hclge_clear_arfs_rules(handle);
6993
6994         ret = hclge_fd_config_rule(hdev, rule);
6995
6996         spin_unlock_bh(&hdev->fd_rule_lock);
6997
6998         if (ret) {
6999                 dev_err(&hdev->pdev->dev,
7000                         "failed to add cls flower rule, ret = %d\n", ret);
7001                 goto err;
7002         }
7003
7004         return 0;
7005 err:
7006         kfree(rule);
7007         return ret;
7008 }
7009
7010 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7011                                                    unsigned long cookie)
7012 {
7013         struct hclge_fd_rule *rule;
7014         struct hlist_node *node;
7015
7016         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7017                 if (rule->cls_flower.cookie == cookie)
7018                         return rule;
7019         }
7020
7021         return NULL;
7022 }
7023
7024 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7025                                 struct flow_cls_offload *cls_flower)
7026 {
7027         struct hclge_vport *vport = hclge_get_vport(handle);
7028         struct hclge_dev *hdev = vport->back;
7029         struct hclge_fd_rule *rule;
7030         int ret;
7031
7032         spin_lock_bh(&hdev->fd_rule_lock);
7033
7034         rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7035         if (!rule) {
7036                 spin_unlock_bh(&hdev->fd_rule_lock);
7037                 return -EINVAL;
7038         }
7039
7040         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7041                                    NULL, false);
7042         if (ret) {
7043                 dev_err(&hdev->pdev->dev,
7044                         "failed to delete cls flower rule %u, ret = %d\n",
7045                         rule->location, ret);
7046                 spin_unlock_bh(&hdev->fd_rule_lock);
7047                 return ret;
7048         }
7049
7050         ret = hclge_fd_update_rule_list(hdev, NULL, rule->location, false);
7051         if (ret) {
7052                 dev_err(&hdev->pdev->dev,
7053                         "failed to delete cls flower rule %u in list, ret = %d\n",
7054                         rule->location, ret);
7055                 spin_unlock_bh(&hdev->fd_rule_lock);
7056                 return ret;
7057         }
7058
7059         spin_unlock_bh(&hdev->fd_rule_lock);
7060
7061         return 0;
7062 }
7063
7064 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7065 {
7066         struct hclge_vport *vport = hclge_get_vport(handle);
7067         struct hclge_dev *hdev = vport->back;
7068
7069         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7070                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7071 }
7072
7073 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7074 {
7075         struct hclge_vport *vport = hclge_get_vport(handle);
7076         struct hclge_dev *hdev = vport->back;
7077
7078         return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7079 }
7080
7081 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7082 {
7083         struct hclge_vport *vport = hclge_get_vport(handle);
7084         struct hclge_dev *hdev = vport->back;
7085
7086         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7087 }
7088
7089 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7090 {
7091         struct hclge_vport *vport = hclge_get_vport(handle);
7092         struct hclge_dev *hdev = vport->back;
7093
7094         return hdev->rst_stats.hw_reset_done_cnt;
7095 }
7096
7097 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7098 {
7099         struct hclge_vport *vport = hclge_get_vport(handle);
7100         struct hclge_dev *hdev = vport->back;
7101         bool clear;
7102
7103         hdev->fd_en = enable;
7104         clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7105
7106         if (!enable) {
7107                 spin_lock_bh(&hdev->fd_rule_lock);
7108                 hclge_del_all_fd_entries(handle, clear);
7109                 spin_unlock_bh(&hdev->fd_rule_lock);
7110         } else {
7111                 hclge_restore_fd_entries(handle);
7112         }
7113 }
7114
7115 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7116 {
7117         struct hclge_desc desc;
7118         struct hclge_config_mac_mode_cmd *req =
7119                 (struct hclge_config_mac_mode_cmd *)desc.data;
7120         u32 loop_en = 0;
7121         int ret;
7122
7123         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7124
7125         if (enable) {
7126                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7127                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7128                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7129                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7130                 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7131                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7132                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7133                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7134                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7135                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7136         }
7137
7138         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7139
7140         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7141         if (ret)
7142                 dev_err(&hdev->pdev->dev,
7143                         "mac enable fail, ret =%d.\n", ret);
7144 }
7145
7146 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7147                                      u8 switch_param, u8 param_mask)
7148 {
7149         struct hclge_mac_vlan_switch_cmd *req;
7150         struct hclge_desc desc;
7151         u32 func_id;
7152         int ret;
7153
7154         func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7155         req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7156
7157         /* read current config parameter */
7158         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7159                                    true);
7160         req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7161         req->func_id = cpu_to_le32(func_id);
7162
7163         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7164         if (ret) {
7165                 dev_err(&hdev->pdev->dev,
7166                         "read mac vlan switch parameter fail, ret = %d\n", ret);
7167                 return ret;
7168         }
7169
7170         /* modify and write new config parameter */
7171         hclge_cmd_reuse_desc(&desc, false);
7172         req->switch_param = (req->switch_param & param_mask) | switch_param;
7173         req->param_mask = param_mask;
7174
7175         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7176         if (ret)
7177                 dev_err(&hdev->pdev->dev,
7178                         "set mac vlan switch parameter fail, ret = %d\n", ret);
7179         return ret;
7180 }
7181
7182 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7183                                        int link_ret)
7184 {
7185 #define HCLGE_PHY_LINK_STATUS_NUM  200
7186
7187         struct phy_device *phydev = hdev->hw.mac.phydev;
7188         int i = 0;
7189         int ret;
7190
7191         do {
7192                 ret = phy_read_status(phydev);
7193                 if (ret) {
7194                         dev_err(&hdev->pdev->dev,
7195                                 "phy update link status fail, ret = %d\n", ret);
7196                         return;
7197                 }
7198
7199                 if (phydev->link == link_ret)
7200                         break;
7201
7202                 msleep(HCLGE_LINK_STATUS_MS);
7203         } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7204 }
7205
7206 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7207 {
7208 #define HCLGE_MAC_LINK_STATUS_NUM  100
7209
7210         int link_status;
7211         int i = 0;
7212         int ret;
7213
7214         do {
7215                 ret = hclge_get_mac_link_status(hdev, &link_status);
7216                 if (ret)
7217                         return ret;
7218                 if (link_status == link_ret)
7219                         return 0;
7220
7221                 msleep(HCLGE_LINK_STATUS_MS);
7222         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7223         return -EBUSY;
7224 }
7225
7226 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7227                                           bool is_phy)
7228 {
7229         int link_ret;
7230
7231         link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7232
7233         if (is_phy)
7234                 hclge_phy_link_status_wait(hdev, link_ret);
7235
7236         return hclge_mac_link_status_wait(hdev, link_ret);
7237 }
7238
7239 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7240 {
7241         struct hclge_config_mac_mode_cmd *req;
7242         struct hclge_desc desc;
7243         u32 loop_en;
7244         int ret;
7245
7246         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7247         /* 1 Read out the MAC mode config at first */
7248         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7249         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7250         if (ret) {
7251                 dev_err(&hdev->pdev->dev,
7252                         "mac loopback get fail, ret =%d.\n", ret);
7253                 return ret;
7254         }
7255
7256         /* 2 Then setup the loopback flag */
7257         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7258         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7259
7260         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7261
7262         /* 3 Config mac work mode with loopback flag
7263          * and its original configure parameters
7264          */
7265         hclge_cmd_reuse_desc(&desc, false);
7266         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7267         if (ret)
7268                 dev_err(&hdev->pdev->dev,
7269                         "mac loopback set fail, ret =%d.\n", ret);
7270         return ret;
7271 }
7272
7273 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
7274                                      enum hnae3_loop loop_mode)
7275 {
7276 #define HCLGE_SERDES_RETRY_MS   10
7277 #define HCLGE_SERDES_RETRY_NUM  100
7278
7279         struct hclge_serdes_lb_cmd *req;
7280         struct hclge_desc desc;
7281         int ret, i = 0;
7282         u8 loop_mode_b;
7283
7284         req = (struct hclge_serdes_lb_cmd *)desc.data;
7285         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
7286
7287         switch (loop_mode) {
7288         case HNAE3_LOOP_SERIAL_SERDES:
7289                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7290                 break;
7291         case HNAE3_LOOP_PARALLEL_SERDES:
7292                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7293                 break;
7294         default:
7295                 dev_err(&hdev->pdev->dev,
7296                         "unsupported serdes loopback mode %d\n", loop_mode);
7297                 return -ENOTSUPP;
7298         }
7299
7300         if (en) {
7301                 req->enable = loop_mode_b;
7302                 req->mask = loop_mode_b;
7303         } else {
7304                 req->mask = loop_mode_b;
7305         }
7306
7307         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7308         if (ret) {
7309                 dev_err(&hdev->pdev->dev,
7310                         "serdes loopback set fail, ret = %d\n", ret);
7311                 return ret;
7312         }
7313
7314         do {
7315                 msleep(HCLGE_SERDES_RETRY_MS);
7316                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
7317                                            true);
7318                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7319                 if (ret) {
7320                         dev_err(&hdev->pdev->dev,
7321                                 "serdes loopback get, ret = %d\n", ret);
7322                         return ret;
7323                 }
7324         } while (++i < HCLGE_SERDES_RETRY_NUM &&
7325                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
7326
7327         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
7328                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
7329                 return -EBUSY;
7330         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
7331                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
7332                 return -EIO;
7333         }
7334         return ret;
7335 }
7336
7337 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
7338                                      enum hnae3_loop loop_mode)
7339 {
7340         int ret;
7341
7342         ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
7343         if (ret)
7344                 return ret;
7345
7346         hclge_cfg_mac_mode(hdev, en);
7347
7348         ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7349         if (ret)
7350                 dev_err(&hdev->pdev->dev,
7351                         "serdes loopback config mac mode timeout\n");
7352
7353         return ret;
7354 }
7355
7356 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7357                                      struct phy_device *phydev)
7358 {
7359         int ret;
7360
7361         if (!phydev->suspended) {
7362                 ret = phy_suspend(phydev);
7363                 if (ret)
7364                         return ret;
7365         }
7366
7367         ret = phy_resume(phydev);
7368         if (ret)
7369                 return ret;
7370
7371         return phy_loopback(phydev, true);
7372 }
7373
7374 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7375                                       struct phy_device *phydev)
7376 {
7377         int ret;
7378
7379         ret = phy_loopback(phydev, false);
7380         if (ret)
7381                 return ret;
7382
7383         return phy_suspend(phydev);
7384 }
7385
7386 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7387 {
7388         struct phy_device *phydev = hdev->hw.mac.phydev;
7389         int ret;
7390
7391         if (!phydev)
7392                 return -ENOTSUPP;
7393
7394         if (en)
7395                 ret = hclge_enable_phy_loopback(hdev, phydev);
7396         else
7397                 ret = hclge_disable_phy_loopback(hdev, phydev);
7398         if (ret) {
7399                 dev_err(&hdev->pdev->dev,
7400                         "set phy loopback fail, ret = %d\n", ret);
7401                 return ret;
7402         }
7403
7404         hclge_cfg_mac_mode(hdev, en);
7405
7406         ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7407         if (ret)
7408                 dev_err(&hdev->pdev->dev,
7409                         "phy loopback config mac mode timeout\n");
7410
7411         return ret;
7412 }
7413
7414 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
7415                             int stream_id, bool enable)
7416 {
7417         struct hclge_desc desc;
7418         struct hclge_cfg_com_tqp_queue_cmd *req =
7419                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7420         int ret;
7421
7422         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7423         req->tqp_id = cpu_to_le16(tqp_id);
7424         req->stream_id = cpu_to_le16(stream_id);
7425         if (enable)
7426                 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7427
7428         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7429         if (ret)
7430                 dev_err(&hdev->pdev->dev,
7431                         "Tqp enable fail, status =%d.\n", ret);
7432         return ret;
7433 }
7434
7435 static int hclge_set_loopback(struct hnae3_handle *handle,
7436                               enum hnae3_loop loop_mode, bool en)
7437 {
7438         struct hclge_vport *vport = hclge_get_vport(handle);
7439         struct hnae3_knic_private_info *kinfo;
7440         struct hclge_dev *hdev = vport->back;
7441         int i, ret;
7442
7443         /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7444          * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7445          * the same, the packets are looped back in the SSU. If SSU loopback
7446          * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7447          */
7448         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7449                 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7450
7451                 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7452                                                 HCLGE_SWITCH_ALW_LPBK_MASK);
7453                 if (ret)
7454                         return ret;
7455         }
7456
7457         switch (loop_mode) {
7458         case HNAE3_LOOP_APP:
7459                 ret = hclge_set_app_loopback(hdev, en);
7460                 break;
7461         case HNAE3_LOOP_SERIAL_SERDES:
7462         case HNAE3_LOOP_PARALLEL_SERDES:
7463                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
7464                 break;
7465         case HNAE3_LOOP_PHY:
7466                 ret = hclge_set_phy_loopback(hdev, en);
7467                 break;
7468         default:
7469                 ret = -ENOTSUPP;
7470                 dev_err(&hdev->pdev->dev,
7471                         "loop_mode %d is not supported\n", loop_mode);
7472                 break;
7473         }
7474
7475         if (ret)
7476                 return ret;
7477
7478         kinfo = &vport->nic.kinfo;
7479         for (i = 0; i < kinfo->num_tqps; i++) {
7480                 ret = hclge_tqp_enable(hdev, i, 0, en);
7481                 if (ret)
7482                         return ret;
7483         }
7484
7485         return 0;
7486 }
7487
7488 static int hclge_set_default_loopback(struct hclge_dev *hdev)
7489 {
7490         int ret;
7491
7492         ret = hclge_set_app_loopback(hdev, false);
7493         if (ret)
7494                 return ret;
7495
7496         ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
7497         if (ret)
7498                 return ret;
7499
7500         return hclge_cfg_serdes_loopback(hdev, false,
7501                                          HNAE3_LOOP_PARALLEL_SERDES);
7502 }
7503
7504 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
7505 {
7506         struct hclge_vport *vport = hclge_get_vport(handle);
7507         struct hnae3_knic_private_info *kinfo;
7508         struct hnae3_queue *queue;
7509         struct hclge_tqp *tqp;
7510         int i;
7511
7512         kinfo = &vport->nic.kinfo;
7513         for (i = 0; i < kinfo->num_tqps; i++) {
7514                 queue = handle->kinfo.tqp[i];
7515                 tqp = container_of(queue, struct hclge_tqp, q);
7516                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
7517         }
7518 }
7519
7520 static void hclge_flush_link_update(struct hclge_dev *hdev)
7521 {
7522 #define HCLGE_FLUSH_LINK_TIMEOUT        100000
7523
7524         unsigned long last = hdev->serv_processed_cnt;
7525         int i = 0;
7526
7527         while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
7528                i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
7529                last == hdev->serv_processed_cnt)
7530                 usleep_range(1, 1);
7531 }
7532
7533 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
7534 {
7535         struct hclge_vport *vport = hclge_get_vport(handle);
7536         struct hclge_dev *hdev = vport->back;
7537
7538         if (enable) {
7539                 hclge_task_schedule(hdev, 0);
7540         } else {
7541                 /* Set the DOWN flag here to disable link updating */
7542                 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7543
7544                 /* flush memory to make sure DOWN is seen by service task */
7545                 smp_mb__before_atomic();
7546                 hclge_flush_link_update(hdev);
7547         }
7548 }
7549
7550 static int hclge_ae_start(struct hnae3_handle *handle)
7551 {
7552         struct hclge_vport *vport = hclge_get_vport(handle);
7553         struct hclge_dev *hdev = vport->back;
7554
7555         /* mac enable */
7556         hclge_cfg_mac_mode(hdev, true);
7557         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
7558         hdev->hw.mac.link = 0;
7559
7560         /* reset tqp stats */
7561         hclge_reset_tqp_stats(handle);
7562
7563         hclge_mac_start_phy(hdev);
7564
7565         return 0;
7566 }
7567
7568 static void hclge_ae_stop(struct hnae3_handle *handle)
7569 {
7570         struct hclge_vport *vport = hclge_get_vport(handle);
7571         struct hclge_dev *hdev = vport->back;
7572         int i;
7573
7574         set_bit(HCLGE_STATE_DOWN, &hdev->state);
7575         spin_lock_bh(&hdev->fd_rule_lock);
7576         hclge_clear_arfs_rules(handle);
7577         spin_unlock_bh(&hdev->fd_rule_lock);
7578
7579         /* If it is not PF reset, the firmware will disable the MAC,
7580          * so it only need to stop phy here.
7581          */
7582         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
7583             hdev->reset_type != HNAE3_FUNC_RESET) {
7584                 hclge_mac_stop_phy(hdev);
7585                 hclge_update_link_status(hdev);
7586                 return;
7587         }
7588
7589         for (i = 0; i < handle->kinfo.num_tqps; i++)
7590                 hclge_reset_tqp(handle, i);
7591
7592         hclge_config_mac_tnl_int(hdev, false);
7593
7594         /* Mac disable */
7595         hclge_cfg_mac_mode(hdev, false);
7596
7597         hclge_mac_stop_phy(hdev);
7598
7599         /* reset tqp stats */
7600         hclge_reset_tqp_stats(handle);
7601         hclge_update_link_status(hdev);
7602 }
7603
7604 int hclge_vport_start(struct hclge_vport *vport)
7605 {
7606         struct hclge_dev *hdev = vport->back;
7607
7608         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7609         vport->last_active_jiffies = jiffies;
7610
7611         if (test_bit(vport->vport_id, hdev->vport_config_block)) {
7612                 if (vport->vport_id) {
7613                         hclge_restore_mac_table_common(vport);
7614                         hclge_restore_vport_vlan_table(vport);
7615                 } else {
7616                         hclge_restore_hw_table(hdev);
7617                 }
7618         }
7619
7620         clear_bit(vport->vport_id, hdev->vport_config_block);
7621
7622         return 0;
7623 }
7624
7625 void hclge_vport_stop(struct hclge_vport *vport)
7626 {
7627         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7628 }
7629
7630 static int hclge_client_start(struct hnae3_handle *handle)
7631 {
7632         struct hclge_vport *vport = hclge_get_vport(handle);
7633
7634         return hclge_vport_start(vport);
7635 }
7636
7637 static void hclge_client_stop(struct hnae3_handle *handle)
7638 {
7639         struct hclge_vport *vport = hclge_get_vport(handle);
7640
7641         hclge_vport_stop(vport);
7642 }
7643
7644 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
7645                                          u16 cmdq_resp, u8  resp_code,
7646                                          enum hclge_mac_vlan_tbl_opcode op)
7647 {
7648         struct hclge_dev *hdev = vport->back;
7649
7650         if (cmdq_resp) {
7651                 dev_err(&hdev->pdev->dev,
7652                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
7653                         cmdq_resp);
7654                 return -EIO;
7655         }
7656
7657         if (op == HCLGE_MAC_VLAN_ADD) {
7658                 if (!resp_code || resp_code == 1)
7659                         return 0;
7660                 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
7661                          resp_code == HCLGE_ADD_MC_OVERFLOW)
7662                         return -ENOSPC;
7663
7664                 dev_err(&hdev->pdev->dev,
7665                         "add mac addr failed for undefined, code=%u.\n",
7666                         resp_code);
7667                 return -EIO;
7668         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
7669                 if (!resp_code) {
7670                         return 0;
7671                 } else if (resp_code == 1) {
7672                         dev_dbg(&hdev->pdev->dev,
7673                                 "remove mac addr failed for miss.\n");
7674                         return -ENOENT;
7675                 }
7676
7677                 dev_err(&hdev->pdev->dev,
7678                         "remove mac addr failed for undefined, code=%u.\n",
7679                         resp_code);
7680                 return -EIO;
7681         } else if (op == HCLGE_MAC_VLAN_LKUP) {
7682                 if (!resp_code) {
7683                         return 0;
7684                 } else if (resp_code == 1) {
7685                         dev_dbg(&hdev->pdev->dev,
7686                                 "lookup mac addr failed for miss.\n");
7687                         return -ENOENT;
7688                 }
7689
7690                 dev_err(&hdev->pdev->dev,
7691                         "lookup mac addr failed for undefined, code=%u.\n",
7692                         resp_code);
7693                 return -EIO;
7694         }
7695
7696         dev_err(&hdev->pdev->dev,
7697                 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7698
7699         return -EINVAL;
7700 }
7701
7702 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7703 {
7704 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
7705
7706         unsigned int word_num;
7707         unsigned int bit_num;
7708
7709         if (vfid > 255 || vfid < 0)
7710                 return -EIO;
7711
7712         if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
7713                 word_num = vfid / 32;
7714                 bit_num  = vfid % 32;
7715                 if (clr)
7716                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7717                 else
7718                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
7719         } else {
7720                 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
7721                 bit_num  = vfid % 32;
7722                 if (clr)
7723                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7724                 else
7725                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
7726         }
7727
7728         return 0;
7729 }
7730
7731 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7732 {
7733 #define HCLGE_DESC_NUMBER 3
7734 #define HCLGE_FUNC_NUMBER_PER_DESC 6
7735         int i, j;
7736
7737         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
7738                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7739                         if (desc[i].data[j])
7740                                 return false;
7741
7742         return true;
7743 }
7744
7745 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7746                                    const u8 *addr, bool is_mc)
7747 {
7748         const unsigned char *mac_addr = addr;
7749         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7750                        (mac_addr[0]) | (mac_addr[1] << 8);
7751         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
7752
7753         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7754         if (is_mc) {
7755                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7756                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7757         }
7758
7759         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7760         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7761 }
7762
7763 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7764                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
7765 {
7766         struct hclge_dev *hdev = vport->back;
7767         struct hclge_desc desc;
7768         u8 resp_code;
7769         u16 retval;
7770         int ret;
7771
7772         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7773
7774         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7775
7776         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7777         if (ret) {
7778                 dev_err(&hdev->pdev->dev,
7779                         "del mac addr failed for cmd_send, ret =%d.\n",
7780                         ret);
7781                 return ret;
7782         }
7783         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7784         retval = le16_to_cpu(desc.retval);
7785
7786         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7787                                              HCLGE_MAC_VLAN_REMOVE);
7788 }
7789
7790 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7791                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
7792                                      struct hclge_desc *desc,
7793                                      bool is_mc)
7794 {
7795         struct hclge_dev *hdev = vport->back;
7796         u8 resp_code;
7797         u16 retval;
7798         int ret;
7799
7800         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7801         if (is_mc) {
7802                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7803                 memcpy(desc[0].data,
7804                        req,
7805                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7806                 hclge_cmd_setup_basic_desc(&desc[1],
7807                                            HCLGE_OPC_MAC_VLAN_ADD,
7808                                            true);
7809                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7810                 hclge_cmd_setup_basic_desc(&desc[2],
7811                                            HCLGE_OPC_MAC_VLAN_ADD,
7812                                            true);
7813                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7814         } else {
7815                 memcpy(desc[0].data,
7816                        req,
7817                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7818                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7819         }
7820         if (ret) {
7821                 dev_err(&hdev->pdev->dev,
7822                         "lookup mac addr failed for cmd_send, ret =%d.\n",
7823                         ret);
7824                 return ret;
7825         }
7826         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7827         retval = le16_to_cpu(desc[0].retval);
7828
7829         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7830                                              HCLGE_MAC_VLAN_LKUP);
7831 }
7832
7833 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7834                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
7835                                   struct hclge_desc *mc_desc)
7836 {
7837         struct hclge_dev *hdev = vport->back;
7838         int cfg_status;
7839         u8 resp_code;
7840         u16 retval;
7841         int ret;
7842
7843         if (!mc_desc) {
7844                 struct hclge_desc desc;
7845
7846                 hclge_cmd_setup_basic_desc(&desc,
7847                                            HCLGE_OPC_MAC_VLAN_ADD,
7848                                            false);
7849                 memcpy(desc.data, req,
7850                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7851                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7852                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7853                 retval = le16_to_cpu(desc.retval);
7854
7855                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7856                                                            resp_code,
7857                                                            HCLGE_MAC_VLAN_ADD);
7858         } else {
7859                 hclge_cmd_reuse_desc(&mc_desc[0], false);
7860                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7861                 hclge_cmd_reuse_desc(&mc_desc[1], false);
7862                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7863                 hclge_cmd_reuse_desc(&mc_desc[2], false);
7864                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7865                 memcpy(mc_desc[0].data, req,
7866                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7867                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7868                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7869                 retval = le16_to_cpu(mc_desc[0].retval);
7870
7871                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7872                                                            resp_code,
7873                                                            HCLGE_MAC_VLAN_ADD);
7874         }
7875
7876         if (ret) {
7877                 dev_err(&hdev->pdev->dev,
7878                         "add mac addr failed for cmd_send, ret =%d.\n",
7879                         ret);
7880                 return ret;
7881         }
7882
7883         return cfg_status;
7884 }
7885
7886 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7887                                u16 *allocated_size)
7888 {
7889         struct hclge_umv_spc_alc_cmd *req;
7890         struct hclge_desc desc;
7891         int ret;
7892
7893         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7894         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7895
7896         req->space_size = cpu_to_le32(space_size);
7897
7898         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7899         if (ret) {
7900                 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7901                         ret);
7902                 return ret;
7903         }
7904
7905         *allocated_size = le32_to_cpu(desc.data[1]);
7906
7907         return 0;
7908 }
7909
7910 static int hclge_init_umv_space(struct hclge_dev *hdev)
7911 {
7912         u16 allocated_size = 0;
7913         int ret;
7914
7915         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
7916         if (ret)
7917                 return ret;
7918
7919         if (allocated_size < hdev->wanted_umv_size)
7920                 dev_warn(&hdev->pdev->dev,
7921                          "failed to alloc umv space, want %u, get %u\n",
7922                          hdev->wanted_umv_size, allocated_size);
7923
7924         hdev->max_umv_size = allocated_size;
7925         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7926         hdev->share_umv_size = hdev->priv_umv_size +
7927                         hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7928
7929         return 0;
7930 }
7931
7932 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7933 {
7934         struct hclge_vport *vport;
7935         int i;
7936
7937         for (i = 0; i < hdev->num_alloc_vport; i++) {
7938                 vport = &hdev->vport[i];
7939                 vport->used_umv_num = 0;
7940         }
7941
7942         mutex_lock(&hdev->vport_lock);
7943         hdev->share_umv_size = hdev->priv_umv_size +
7944                         hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7945         mutex_unlock(&hdev->vport_lock);
7946 }
7947
7948 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
7949 {
7950         struct hclge_dev *hdev = vport->back;
7951         bool is_full;
7952
7953         if (need_lock)
7954                 mutex_lock(&hdev->vport_lock);
7955
7956         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7957                    hdev->share_umv_size == 0);
7958
7959         if (need_lock)
7960                 mutex_unlock(&hdev->vport_lock);
7961
7962         return is_full;
7963 }
7964
7965 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7966 {
7967         struct hclge_dev *hdev = vport->back;
7968
7969         if (is_free) {
7970                 if (vport->used_umv_num > hdev->priv_umv_size)
7971                         hdev->share_umv_size++;
7972
7973                 if (vport->used_umv_num > 0)
7974                         vport->used_umv_num--;
7975         } else {
7976                 if (vport->used_umv_num >= hdev->priv_umv_size &&
7977                     hdev->share_umv_size > 0)
7978                         hdev->share_umv_size--;
7979                 vport->used_umv_num++;
7980         }
7981 }
7982
7983 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7984                                                   const u8 *mac_addr)
7985 {
7986         struct hclge_mac_node *mac_node, *tmp;
7987
7988         list_for_each_entry_safe(mac_node, tmp, list, node)
7989                 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7990                         return mac_node;
7991
7992         return NULL;
7993 }
7994
7995 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
7996                                   enum HCLGE_MAC_NODE_STATE state)
7997 {
7998         switch (state) {
7999         /* from set_rx_mode or tmp_add_list */
8000         case HCLGE_MAC_TO_ADD:
8001                 if (mac_node->state == HCLGE_MAC_TO_DEL)
8002                         mac_node->state = HCLGE_MAC_ACTIVE;
8003                 break;
8004         /* only from set_rx_mode */
8005         case HCLGE_MAC_TO_DEL:
8006                 if (mac_node->state == HCLGE_MAC_TO_ADD) {
8007                         list_del(&mac_node->node);
8008                         kfree(mac_node);
8009                 } else {
8010                         mac_node->state = HCLGE_MAC_TO_DEL;
8011                 }
8012                 break;
8013         /* only from tmp_add_list, the mac_node->state won't be
8014          * ACTIVE.
8015          */
8016         case HCLGE_MAC_ACTIVE:
8017                 if (mac_node->state == HCLGE_MAC_TO_ADD)
8018                         mac_node->state = HCLGE_MAC_ACTIVE;
8019
8020                 break;
8021         }
8022 }
8023
8024 int hclge_update_mac_list(struct hclge_vport *vport,
8025                           enum HCLGE_MAC_NODE_STATE state,
8026                           enum HCLGE_MAC_ADDR_TYPE mac_type,
8027                           const unsigned char *addr)
8028 {
8029         struct hclge_dev *hdev = vport->back;
8030         struct hclge_mac_node *mac_node;
8031         struct list_head *list;
8032
8033         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8034                 &vport->uc_mac_list : &vport->mc_mac_list;
8035
8036         spin_lock_bh(&vport->mac_list_lock);
8037
8038         /* if the mac addr is already in the mac list, no need to add a new
8039          * one into it, just check the mac addr state, convert it to a new
8040          * new state, or just remove it, or do nothing.
8041          */
8042         mac_node = hclge_find_mac_node(list, addr);
8043         if (mac_node) {
8044                 hclge_update_mac_node(mac_node, state);
8045                 spin_unlock_bh(&vport->mac_list_lock);
8046                 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8047                 return 0;
8048         }
8049
8050         /* if this address is never added, unnecessary to delete */
8051         if (state == HCLGE_MAC_TO_DEL) {
8052                 spin_unlock_bh(&vport->mac_list_lock);
8053                 dev_err(&hdev->pdev->dev,
8054                         "failed to delete address %pM from mac list\n",
8055                         addr);
8056                 return -ENOENT;
8057         }
8058
8059         mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8060         if (!mac_node) {
8061                 spin_unlock_bh(&vport->mac_list_lock);
8062                 return -ENOMEM;
8063         }
8064
8065         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8066
8067         mac_node->state = state;
8068         ether_addr_copy(mac_node->mac_addr, addr);
8069         list_add_tail(&mac_node->node, list);
8070
8071         spin_unlock_bh(&vport->mac_list_lock);
8072
8073         return 0;
8074 }
8075
8076 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8077                              const unsigned char *addr)
8078 {
8079         struct hclge_vport *vport = hclge_get_vport(handle);
8080
8081         return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8082                                      addr);
8083 }
8084
8085 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8086                              const unsigned char *addr)
8087 {
8088         struct hclge_dev *hdev = vport->back;
8089         struct hclge_mac_vlan_tbl_entry_cmd req;
8090         struct hclge_desc desc;
8091         u16 egress_port = 0;
8092         int ret;
8093
8094         /* mac addr check */
8095         if (is_zero_ether_addr(addr) ||
8096             is_broadcast_ether_addr(addr) ||
8097             is_multicast_ether_addr(addr)) {
8098                 dev_err(&hdev->pdev->dev,
8099                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8100                          addr, is_zero_ether_addr(addr),
8101                          is_broadcast_ether_addr(addr),
8102                          is_multicast_ether_addr(addr));
8103                 return -EINVAL;
8104         }
8105
8106         memset(&req, 0, sizeof(req));
8107
8108         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8109                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8110
8111         req.egress_port = cpu_to_le16(egress_port);
8112
8113         hclge_prepare_mac_addr(&req, addr, false);
8114
8115         /* Lookup the mac address in the mac_vlan table, and add
8116          * it if the entry is inexistent. Repeated unicast entry
8117          * is not allowed in the mac vlan table.
8118          */
8119         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8120         if (ret == -ENOENT) {
8121                 mutex_lock(&hdev->vport_lock);
8122                 if (!hclge_is_umv_space_full(vport, false)) {
8123                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8124                         if (!ret)
8125                                 hclge_update_umv_space(vport, false);
8126                         mutex_unlock(&hdev->vport_lock);
8127                         return ret;
8128                 }
8129                 mutex_unlock(&hdev->vport_lock);
8130
8131                 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8132                         dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8133                                 hdev->priv_umv_size);
8134
8135                 return -ENOSPC;
8136         }
8137
8138         /* check if we just hit the duplicate */
8139         if (!ret) {
8140                 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
8141                          vport->vport_id, addr);
8142                 return 0;
8143         }
8144
8145         dev_err(&hdev->pdev->dev,
8146                 "PF failed to add unicast entry(%pM) in the MAC table\n",
8147                 addr);
8148
8149         return ret;
8150 }
8151
8152 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8153                             const unsigned char *addr)
8154 {
8155         struct hclge_vport *vport = hclge_get_vport(handle);
8156
8157         return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8158                                      addr);
8159 }
8160
8161 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8162                             const unsigned char *addr)
8163 {
8164         struct hclge_dev *hdev = vport->back;
8165         struct hclge_mac_vlan_tbl_entry_cmd req;
8166         int ret;
8167
8168         /* mac addr check */
8169         if (is_zero_ether_addr(addr) ||
8170             is_broadcast_ether_addr(addr) ||
8171             is_multicast_ether_addr(addr)) {
8172                 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8173                         addr);
8174                 return -EINVAL;
8175         }
8176
8177         memset(&req, 0, sizeof(req));
8178         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8179         hclge_prepare_mac_addr(&req, addr, false);
8180         ret = hclge_remove_mac_vlan_tbl(vport, &req);
8181         if (!ret) {
8182                 mutex_lock(&hdev->vport_lock);
8183                 hclge_update_umv_space(vport, true);
8184                 mutex_unlock(&hdev->vport_lock);
8185         } else if (ret == -ENOENT) {
8186                 ret = 0;
8187         }
8188
8189         return ret;
8190 }
8191
8192 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8193                              const unsigned char *addr)
8194 {
8195         struct hclge_vport *vport = hclge_get_vport(handle);
8196
8197         return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8198                                      addr);
8199 }
8200
8201 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8202                              const unsigned char *addr)
8203 {
8204         struct hclge_dev *hdev = vport->back;
8205         struct hclge_mac_vlan_tbl_entry_cmd req;
8206         struct hclge_desc desc[3];
8207         int status;
8208
8209         /* mac addr check */
8210         if (!is_multicast_ether_addr(addr)) {
8211                 dev_err(&hdev->pdev->dev,
8212                         "Add mc mac err! invalid mac:%pM.\n",
8213                          addr);
8214                 return -EINVAL;
8215         }
8216         memset(&req, 0, sizeof(req));
8217         hclge_prepare_mac_addr(&req, addr, true);
8218         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8219         if (status) {
8220                 /* This mac addr do not exist, add new entry for it */
8221                 memset(desc[0].data, 0, sizeof(desc[0].data));
8222                 memset(desc[1].data, 0, sizeof(desc[0].data));
8223                 memset(desc[2].data, 0, sizeof(desc[0].data));
8224         }
8225         status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8226         if (status)
8227                 return status;
8228         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8229
8230         /* if already overflow, not to print each time */
8231         if (status == -ENOSPC &&
8232             !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8233                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8234
8235         return status;
8236 }
8237
8238 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8239                             const unsigned char *addr)
8240 {
8241         struct hclge_vport *vport = hclge_get_vport(handle);
8242
8243         return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8244                                      addr);
8245 }
8246
8247 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8248                             const unsigned char *addr)
8249 {
8250         struct hclge_dev *hdev = vport->back;
8251         struct hclge_mac_vlan_tbl_entry_cmd req;
8252         enum hclge_cmd_status status;
8253         struct hclge_desc desc[3];
8254
8255         /* mac addr check */
8256         if (!is_multicast_ether_addr(addr)) {
8257                 dev_dbg(&hdev->pdev->dev,
8258                         "Remove mc mac err! invalid mac:%pM.\n",
8259                          addr);
8260                 return -EINVAL;
8261         }
8262
8263         memset(&req, 0, sizeof(req));
8264         hclge_prepare_mac_addr(&req, addr, true);
8265         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8266         if (!status) {
8267                 /* This mac addr exist, remove this handle's VFID for it */
8268                 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8269                 if (status)
8270                         return status;
8271
8272                 if (hclge_is_all_function_id_zero(desc))
8273                         /* All the vfid is zero, so need to delete this entry */
8274                         status = hclge_remove_mac_vlan_tbl(vport, &req);
8275                 else
8276                         /* Not all the vfid is zero, update the vfid */
8277                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8278
8279         } else if (status == -ENOENT) {
8280                 status = 0;
8281         }
8282
8283         return status;
8284 }
8285
8286 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8287                                       struct list_head *list,
8288                                       int (*sync)(struct hclge_vport *,
8289                                                   const unsigned char *))
8290 {
8291         struct hclge_mac_node *mac_node, *tmp;
8292         int ret;
8293
8294         list_for_each_entry_safe(mac_node, tmp, list, node) {
8295                 ret = sync(vport, mac_node->mac_addr);
8296                 if (!ret) {
8297                         mac_node->state = HCLGE_MAC_ACTIVE;
8298                 } else {
8299                         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8300                                 &vport->state);
8301                         break;
8302                 }
8303         }
8304 }
8305
8306 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8307                                         struct list_head *list,
8308                                         int (*unsync)(struct hclge_vport *,
8309                                                       const unsigned char *))
8310 {
8311         struct hclge_mac_node *mac_node, *tmp;
8312         int ret;
8313
8314         list_for_each_entry_safe(mac_node, tmp, list, node) {
8315                 ret = unsync(vport, mac_node->mac_addr);
8316                 if (!ret || ret == -ENOENT) {
8317                         list_del(&mac_node->node);
8318                         kfree(mac_node);
8319                 } else {
8320                         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8321                                 &vport->state);
8322                         break;
8323                 }
8324         }
8325 }
8326
8327 static bool hclge_sync_from_add_list(struct list_head *add_list,
8328                                      struct list_head *mac_list)
8329 {
8330         struct hclge_mac_node *mac_node, *tmp, *new_node;
8331         bool all_added = true;
8332
8333         list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8334                 if (mac_node->state == HCLGE_MAC_TO_ADD)
8335                         all_added = false;
8336
8337                 /* if the mac address from tmp_add_list is not in the
8338                  * uc/mc_mac_list, it means have received a TO_DEL request
8339                  * during the time window of adding the mac address into mac
8340                  * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8341                  * then it will be removed at next time. else it must be TO_ADD,
8342                  * this address hasn't been added into mac table,
8343                  * so just remove the mac node.
8344                  */
8345                 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8346                 if (new_node) {
8347                         hclge_update_mac_node(new_node, mac_node->state);
8348                         list_del(&mac_node->node);
8349                         kfree(mac_node);
8350                 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8351                         mac_node->state = HCLGE_MAC_TO_DEL;
8352                         list_del(&mac_node->node);
8353                         list_add_tail(&mac_node->node, mac_list);
8354                 } else {
8355                         list_del(&mac_node->node);
8356                         kfree(mac_node);
8357                 }
8358         }
8359
8360         return all_added;
8361 }
8362
8363 static void hclge_sync_from_del_list(struct list_head *del_list,
8364                                      struct list_head *mac_list)
8365 {
8366         struct hclge_mac_node *mac_node, *tmp, *new_node;
8367
8368         list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8369                 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8370                 if (new_node) {
8371                         /* If the mac addr exists in the mac list, it means
8372                          * received a new TO_ADD request during the time window
8373                          * of configuring the mac address. For the mac node
8374                          * state is TO_ADD, and the address is already in the
8375                          * in the hardware(due to delete fail), so we just need
8376                          * to change the mac node state to ACTIVE.
8377                          */
8378                         new_node->state = HCLGE_MAC_ACTIVE;
8379                         list_del(&mac_node->node);
8380                         kfree(mac_node);
8381                 } else {
8382                         list_del(&mac_node->node);
8383                         list_add_tail(&mac_node->node, mac_list);
8384                 }
8385         }
8386 }
8387
8388 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8389                                         enum HCLGE_MAC_ADDR_TYPE mac_type,
8390                                         bool is_all_added)
8391 {
8392         if (mac_type == HCLGE_MAC_ADDR_UC) {
8393                 if (is_all_added)
8394                         vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8395                 else
8396                         vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8397         } else {
8398                 if (is_all_added)
8399                         vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8400                 else
8401                         vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8402         }
8403 }
8404
8405 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8406                                        enum HCLGE_MAC_ADDR_TYPE mac_type)
8407 {
8408         struct hclge_mac_node *mac_node, *tmp, *new_node;
8409         struct list_head tmp_add_list, tmp_del_list;
8410         struct list_head *list;
8411         bool all_added;
8412
8413         INIT_LIST_HEAD(&tmp_add_list);
8414         INIT_LIST_HEAD(&tmp_del_list);
8415
8416         /* move the mac addr to the tmp_add_list and tmp_del_list, then
8417          * we can add/delete these mac addr outside the spin lock
8418          */
8419         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8420                 &vport->uc_mac_list : &vport->mc_mac_list;
8421
8422         spin_lock_bh(&vport->mac_list_lock);
8423
8424         list_for_each_entry_safe(mac_node, tmp, list, node) {
8425                 switch (mac_node->state) {
8426                 case HCLGE_MAC_TO_DEL:
8427                         list_del(&mac_node->node);
8428                         list_add_tail(&mac_node->node, &tmp_del_list);
8429                         break;
8430                 case HCLGE_MAC_TO_ADD:
8431                         new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8432                         if (!new_node)
8433                                 goto stop_traverse;
8434                         ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8435                         new_node->state = mac_node->state;
8436                         list_add_tail(&new_node->node, &tmp_add_list);
8437                         break;
8438                 default:
8439                         break;
8440                 }
8441         }
8442
8443 stop_traverse:
8444         spin_unlock_bh(&vport->mac_list_lock);
8445
8446         /* delete first, in order to get max mac table space for adding */
8447         if (mac_type == HCLGE_MAC_ADDR_UC) {
8448                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8449                                             hclge_rm_uc_addr_common);
8450                 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8451                                           hclge_add_uc_addr_common);
8452         } else {
8453                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8454                                             hclge_rm_mc_addr_common);
8455                 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8456                                           hclge_add_mc_addr_common);
8457         }
8458
8459         /* if some mac addresses were added/deleted fail, move back to the
8460          * mac_list, and retry at next time.
8461          */
8462         spin_lock_bh(&vport->mac_list_lock);
8463
8464         hclge_sync_from_del_list(&tmp_del_list, list);
8465         all_added = hclge_sync_from_add_list(&tmp_add_list, list);
8466
8467         spin_unlock_bh(&vport->mac_list_lock);
8468
8469         hclge_update_overflow_flags(vport, mac_type, all_added);
8470 }
8471
8472 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
8473 {
8474         struct hclge_dev *hdev = vport->back;
8475
8476         if (test_bit(vport->vport_id, hdev->vport_config_block))
8477                 return false;
8478
8479         if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
8480                 return true;
8481
8482         return false;
8483 }
8484
8485 static void hclge_sync_mac_table(struct hclge_dev *hdev)
8486 {
8487         int i;
8488
8489         for (i = 0; i < hdev->num_alloc_vport; i++) {
8490                 struct hclge_vport *vport = &hdev->vport[i];
8491
8492                 if (!hclge_need_sync_mac_table(vport))
8493                         continue;
8494
8495                 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
8496                 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
8497         }
8498 }
8499
8500 static void hclge_build_del_list(struct list_head *list,
8501                                  bool is_del_list,
8502                                  struct list_head *tmp_del_list)
8503 {
8504         struct hclge_mac_node *mac_cfg, *tmp;
8505
8506         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
8507                 switch (mac_cfg->state) {
8508                 case HCLGE_MAC_TO_DEL:
8509                 case HCLGE_MAC_ACTIVE:
8510                         list_del(&mac_cfg->node);
8511                         list_add_tail(&mac_cfg->node, tmp_del_list);
8512                         break;
8513                 case HCLGE_MAC_TO_ADD:
8514                         if (is_del_list) {
8515                                 list_del(&mac_cfg->node);
8516                                 kfree(mac_cfg);
8517                         }
8518                         break;
8519                 }
8520         }
8521 }
8522
8523 static void hclge_unsync_del_list(struct hclge_vport *vport,
8524                                   int (*unsync)(struct hclge_vport *vport,
8525                                                 const unsigned char *addr),
8526                                   bool is_del_list,
8527                                   struct list_head *tmp_del_list)
8528 {
8529         struct hclge_mac_node *mac_cfg, *tmp;
8530         int ret;
8531
8532         list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
8533                 ret = unsync(vport, mac_cfg->mac_addr);
8534                 if (!ret || ret == -ENOENT) {
8535                         /* clear all mac addr from hardware, but remain these
8536                          * mac addr in the mac list, and restore them after
8537                          * vf reset finished.
8538                          */
8539                         if (!is_del_list &&
8540                             mac_cfg->state == HCLGE_MAC_ACTIVE) {
8541                                 mac_cfg->state = HCLGE_MAC_TO_ADD;
8542                         } else {
8543                                 list_del(&mac_cfg->node);
8544                                 kfree(mac_cfg);
8545                         }
8546                 } else if (is_del_list) {
8547                         mac_cfg->state = HCLGE_MAC_TO_DEL;
8548                 }
8549         }
8550 }
8551
8552 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
8553                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
8554 {
8555         int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
8556         struct hclge_dev *hdev = vport->back;
8557         struct list_head tmp_del_list, *list;
8558
8559         if (mac_type == HCLGE_MAC_ADDR_UC) {
8560                 list = &vport->uc_mac_list;
8561                 unsync = hclge_rm_uc_addr_common;
8562         } else {
8563                 list = &vport->mc_mac_list;
8564                 unsync = hclge_rm_mc_addr_common;
8565         }
8566
8567         INIT_LIST_HEAD(&tmp_del_list);
8568
8569         if (!is_del_list)
8570                 set_bit(vport->vport_id, hdev->vport_config_block);
8571
8572         spin_lock_bh(&vport->mac_list_lock);
8573
8574         hclge_build_del_list(list, is_del_list, &tmp_del_list);
8575
8576         spin_unlock_bh(&vport->mac_list_lock);
8577
8578         hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
8579
8580         spin_lock_bh(&vport->mac_list_lock);
8581
8582         hclge_sync_from_del_list(&tmp_del_list, list);
8583
8584         spin_unlock_bh(&vport->mac_list_lock);
8585 }
8586
8587 /* remove all mac address when uninitailize */
8588 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
8589                                         enum HCLGE_MAC_ADDR_TYPE mac_type)
8590 {
8591         struct hclge_mac_node *mac_node, *tmp;
8592         struct hclge_dev *hdev = vport->back;
8593         struct list_head tmp_del_list, *list;
8594
8595         INIT_LIST_HEAD(&tmp_del_list);
8596
8597         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8598                 &vport->uc_mac_list : &vport->mc_mac_list;
8599
8600         spin_lock_bh(&vport->mac_list_lock);
8601
8602         list_for_each_entry_safe(mac_node, tmp, list, node) {
8603                 switch (mac_node->state) {
8604                 case HCLGE_MAC_TO_DEL:
8605                 case HCLGE_MAC_ACTIVE:
8606                         list_del(&mac_node->node);
8607                         list_add_tail(&mac_node->node, &tmp_del_list);
8608                         break;
8609                 case HCLGE_MAC_TO_ADD:
8610                         list_del(&mac_node->node);
8611                         kfree(mac_node);
8612                         break;
8613                 }
8614         }
8615
8616         spin_unlock_bh(&vport->mac_list_lock);
8617
8618         if (mac_type == HCLGE_MAC_ADDR_UC)
8619                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8620                                             hclge_rm_uc_addr_common);
8621         else
8622                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8623                                             hclge_rm_mc_addr_common);
8624
8625         if (!list_empty(&tmp_del_list))
8626                 dev_warn(&hdev->pdev->dev,
8627                          "uninit %s mac list for vport %u not completely.\n",
8628                          mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
8629                          vport->vport_id);
8630
8631         list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
8632                 list_del(&mac_node->node);
8633                 kfree(mac_node);
8634         }
8635 }
8636
8637 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
8638 {
8639         struct hclge_vport *vport;
8640         int i;
8641
8642         for (i = 0; i < hdev->num_alloc_vport; i++) {
8643                 vport = &hdev->vport[i];
8644                 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
8645                 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
8646         }
8647 }
8648
8649 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
8650                                               u16 cmdq_resp, u8 resp_code)
8651 {
8652 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
8653 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
8654 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
8655 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
8656
8657         int return_status;
8658
8659         if (cmdq_resp) {
8660                 dev_err(&hdev->pdev->dev,
8661                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
8662                         cmdq_resp);
8663                 return -EIO;
8664         }
8665
8666         switch (resp_code) {
8667         case HCLGE_ETHERTYPE_SUCCESS_ADD:
8668         case HCLGE_ETHERTYPE_ALREADY_ADD:
8669                 return_status = 0;
8670                 break;
8671         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
8672                 dev_err(&hdev->pdev->dev,
8673                         "add mac ethertype failed for manager table overflow.\n");
8674                 return_status = -EIO;
8675                 break;
8676         case HCLGE_ETHERTYPE_KEY_CONFLICT:
8677                 dev_err(&hdev->pdev->dev,
8678                         "add mac ethertype failed for key conflict.\n");
8679                 return_status = -EIO;
8680                 break;
8681         default:
8682                 dev_err(&hdev->pdev->dev,
8683                         "add mac ethertype failed for undefined, code=%u.\n",
8684                         resp_code);
8685                 return_status = -EIO;
8686         }
8687
8688         return return_status;
8689 }
8690
8691 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
8692                                      u8 *mac_addr)
8693 {
8694         struct hclge_mac_vlan_tbl_entry_cmd req;
8695         struct hclge_dev *hdev = vport->back;
8696         struct hclge_desc desc;
8697         u16 egress_port = 0;
8698         int i;
8699
8700         if (is_zero_ether_addr(mac_addr))
8701                 return false;
8702
8703         memset(&req, 0, sizeof(req));
8704         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8705                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8706         req.egress_port = cpu_to_le16(egress_port);
8707         hclge_prepare_mac_addr(&req, mac_addr, false);
8708
8709         if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8710                 return true;
8711
8712         vf_idx += HCLGE_VF_VPORT_START_NUM;
8713         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8714                 if (i != vf_idx &&
8715                     ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8716                         return true;
8717
8718         return false;
8719 }
8720
8721 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8722                             u8 *mac_addr)
8723 {
8724         struct hclge_vport *vport = hclge_get_vport(handle);
8725         struct hclge_dev *hdev = vport->back;
8726
8727         vport = hclge_get_vf_vport(hdev, vf);
8728         if (!vport)
8729                 return -EINVAL;
8730
8731         if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8732                 dev_info(&hdev->pdev->dev,
8733                          "Specified MAC(=%pM) is same as before, no change committed!\n",
8734                          mac_addr);
8735                 return 0;
8736         }
8737
8738         if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8739                 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8740                         mac_addr);
8741                 return -EEXIST;
8742         }
8743
8744         ether_addr_copy(vport->vf_info.mac, mac_addr);
8745
8746         if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8747                 dev_info(&hdev->pdev->dev,
8748                          "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8749                          vf, mac_addr);
8750                 return hclge_inform_reset_assert_to_vf(vport);
8751         }
8752
8753         dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8754                  vf, mac_addr);
8755         return 0;
8756 }
8757
8758 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8759                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
8760 {
8761         struct hclge_desc desc;
8762         u8 resp_code;
8763         u16 retval;
8764         int ret;
8765
8766         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8767         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8768
8769         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8770         if (ret) {
8771                 dev_err(&hdev->pdev->dev,
8772                         "add mac ethertype failed for cmd_send, ret =%d.\n",
8773                         ret);
8774                 return ret;
8775         }
8776
8777         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8778         retval = le16_to_cpu(desc.retval);
8779
8780         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8781 }
8782
8783 static int init_mgr_tbl(struct hclge_dev *hdev)
8784 {
8785         int ret;
8786         int i;
8787
8788         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8789                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8790                 if (ret) {
8791                         dev_err(&hdev->pdev->dev,
8792                                 "add mac ethertype failed, ret =%d.\n",
8793                                 ret);
8794                         return ret;
8795                 }
8796         }
8797
8798         return 0;
8799 }
8800
8801 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8802 {
8803         struct hclge_vport *vport = hclge_get_vport(handle);
8804         struct hclge_dev *hdev = vport->back;
8805
8806         ether_addr_copy(p, hdev->hw.mac.mac_addr);
8807 }
8808
8809 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8810                                        const u8 *old_addr, const u8 *new_addr)
8811 {
8812         struct list_head *list = &vport->uc_mac_list;
8813         struct hclge_mac_node *old_node, *new_node;
8814
8815         new_node = hclge_find_mac_node(list, new_addr);
8816         if (!new_node) {
8817                 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8818                 if (!new_node)
8819                         return -ENOMEM;
8820
8821                 new_node->state = HCLGE_MAC_TO_ADD;
8822                 ether_addr_copy(new_node->mac_addr, new_addr);
8823                 list_add(&new_node->node, list);
8824         } else {
8825                 if (new_node->state == HCLGE_MAC_TO_DEL)
8826                         new_node->state = HCLGE_MAC_ACTIVE;
8827
8828                 /* make sure the new addr is in the list head, avoid dev
8829                  * addr may be not re-added into mac table for the umv space
8830                  * limitation after global/imp reset which will clear mac
8831                  * table by hardware.
8832                  */
8833                 list_move(&new_node->node, list);
8834         }
8835
8836         if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8837                 old_node = hclge_find_mac_node(list, old_addr);
8838                 if (old_node) {
8839                         if (old_node->state == HCLGE_MAC_TO_ADD) {
8840                                 list_del(&old_node->node);
8841                                 kfree(old_node);
8842                         } else {
8843                                 old_node->state = HCLGE_MAC_TO_DEL;
8844                         }
8845                 }
8846         }
8847
8848         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8849
8850         return 0;
8851 }
8852
8853 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8854                               bool is_first)
8855 {
8856         const unsigned char *new_addr = (const unsigned char *)p;
8857         struct hclge_vport *vport = hclge_get_vport(handle);
8858         struct hclge_dev *hdev = vport->back;
8859         unsigned char *old_addr = NULL;
8860         int ret;
8861
8862         /* mac addr check */
8863         if (is_zero_ether_addr(new_addr) ||
8864             is_broadcast_ether_addr(new_addr) ||
8865             is_multicast_ether_addr(new_addr)) {
8866                 dev_err(&hdev->pdev->dev,
8867                         "change uc mac err! invalid mac: %pM.\n",
8868                          new_addr);
8869                 return -EINVAL;
8870         }
8871
8872         ret = hclge_pause_addr_cfg(hdev, new_addr);
8873         if (ret) {
8874                 dev_err(&hdev->pdev->dev,
8875                         "failed to configure mac pause address, ret = %d\n",
8876                         ret);
8877                 return ret;
8878         }
8879
8880         if (!is_first)
8881                 old_addr = hdev->hw.mac.mac_addr;
8882
8883         spin_lock_bh(&vport->mac_list_lock);
8884         ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
8885         if (ret) {
8886                 dev_err(&hdev->pdev->dev,
8887                         "failed to change the mac addr:%pM, ret = %d\n",
8888                         new_addr, ret);
8889                 spin_unlock_bh(&vport->mac_list_lock);
8890
8891                 if (!is_first)
8892                         hclge_pause_addr_cfg(hdev, old_addr);
8893
8894                 return ret;
8895         }
8896         /* we must update dev addr with spin lock protect, preventing dev addr
8897          * being removed by set_rx_mode path.
8898          */
8899         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
8900         spin_unlock_bh(&vport->mac_list_lock);
8901
8902         hclge_task_schedule(hdev, 0);
8903
8904         return 0;
8905 }
8906
8907 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8908                           int cmd)
8909 {
8910         struct hclge_vport *vport = hclge_get_vport(handle);
8911         struct hclge_dev *hdev = vport->back;
8912
8913         if (!hdev->hw.mac.phydev)
8914                 return -EOPNOTSUPP;
8915
8916         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8917 }
8918
8919 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
8920                                       u8 fe_type, bool filter_en, u8 vf_id)
8921 {
8922         struct hclge_vlan_filter_ctrl_cmd *req;
8923         struct hclge_desc desc;
8924         int ret;
8925
8926         /* read current vlan filter parameter */
8927         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
8928         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
8929         req->vlan_type = vlan_type;
8930         req->vf_id = vf_id;
8931
8932         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8933         if (ret) {
8934                 dev_err(&hdev->pdev->dev,
8935                         "failed to get vlan filter config, ret = %d.\n", ret);
8936                 return ret;
8937         }
8938
8939         /* modify and write new config parameter */
8940         hclge_cmd_reuse_desc(&desc, false);
8941         req->vlan_fe = filter_en ?
8942                         (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
8943
8944         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8945         if (ret)
8946                 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
8947                         ret);
8948
8949         return ret;
8950 }
8951
8952 #define HCLGE_FILTER_TYPE_VF            0
8953 #define HCLGE_FILTER_TYPE_PORT          1
8954 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
8955 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
8956 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
8957 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
8958 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
8959 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
8960                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
8961 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
8962                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
8963
8964 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
8965 {
8966         struct hclge_vport *vport = hclge_get_vport(handle);
8967         struct hclge_dev *hdev = vport->back;
8968
8969         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8970                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8971                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
8972                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8973                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
8974         } else {
8975                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8976                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
8977                                            0);
8978         }
8979         if (enable)
8980                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8981         else
8982                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
8983 }
8984
8985 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
8986                                         bool is_kill, u16 vlan,
8987                                         struct hclge_desc *desc)
8988 {
8989         struct hclge_vlan_filter_vf_cfg_cmd *req0;
8990         struct hclge_vlan_filter_vf_cfg_cmd *req1;
8991         u8 vf_byte_val;
8992         u8 vf_byte_off;
8993         int ret;
8994
8995         hclge_cmd_setup_basic_desc(&desc[0],
8996                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8997         hclge_cmd_setup_basic_desc(&desc[1],
8998                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8999
9000         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9001
9002         vf_byte_off = vfid / 8;
9003         vf_byte_val = 1 << (vfid % 8);
9004
9005         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9006         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9007
9008         req0->vlan_id  = cpu_to_le16(vlan);
9009         req0->vlan_cfg = is_kill;
9010
9011         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9012                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9013         else
9014                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9015
9016         ret = hclge_cmd_send(&hdev->hw, desc, 2);
9017         if (ret) {
9018                 dev_err(&hdev->pdev->dev,
9019                         "Send vf vlan command fail, ret =%d.\n",
9020                         ret);
9021                 return ret;
9022         }
9023
9024         return 0;
9025 }
9026
9027 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9028                                           bool is_kill, struct hclge_desc *desc)
9029 {
9030         struct hclge_vlan_filter_vf_cfg_cmd *req;
9031
9032         req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9033
9034         if (!is_kill) {
9035 #define HCLGE_VF_VLAN_NO_ENTRY  2
9036                 if (!req->resp_code || req->resp_code == 1)
9037                         return 0;
9038
9039                 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9040                         set_bit(vfid, hdev->vf_vlan_full);
9041                         dev_warn(&hdev->pdev->dev,
9042                                  "vf vlan table is full, vf vlan filter is disabled\n");
9043                         return 0;
9044                 }
9045
9046                 dev_err(&hdev->pdev->dev,
9047                         "Add vf vlan filter fail, ret =%u.\n",
9048                         req->resp_code);
9049         } else {
9050 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
9051                 if (!req->resp_code)
9052                         return 0;
9053
9054                 /* vf vlan filter is disabled when vf vlan table is full,
9055                  * then new vlan id will not be added into vf vlan table.
9056                  * Just return 0 without warning, avoid massive verbose
9057                  * print logs when unload.
9058                  */
9059                 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9060                         return 0;
9061
9062                 dev_err(&hdev->pdev->dev,
9063                         "Kill vf vlan filter fail, ret =%u.\n",
9064                         req->resp_code);
9065         }
9066
9067         return -EIO;
9068 }
9069
9070 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9071                                     bool is_kill, u16 vlan,
9072                                     __be16 proto)
9073 {
9074         struct hclge_vport *vport = &hdev->vport[vfid];
9075         struct hclge_desc desc[2];
9076         int ret;
9077
9078         /* if vf vlan table is full, firmware will close vf vlan filter, it
9079          * is unable and unnecessary to add new vlan id to vf vlan filter.
9080          * If spoof check is enable, and vf vlan is full, it shouldn't add
9081          * new vlan, because tx packets with these vlan id will be dropped.
9082          */
9083         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9084                 if (vport->vf_info.spoofchk && vlan) {
9085                         dev_err(&hdev->pdev->dev,
9086                                 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
9087                         return -EPERM;
9088                 }
9089                 return 0;
9090         }
9091
9092         ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9093         if (ret)
9094                 return ret;
9095
9096         return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9097 }
9098
9099 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9100                                       u16 vlan_id, bool is_kill)
9101 {
9102         struct hclge_vlan_filter_pf_cfg_cmd *req;
9103         struct hclge_desc desc;
9104         u8 vlan_offset_byte_val;
9105         u8 vlan_offset_byte;
9106         u8 vlan_offset_160;
9107         int ret;
9108
9109         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9110
9111         vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9112         vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9113                            HCLGE_VLAN_BYTE_SIZE;
9114         vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9115
9116         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9117         req->vlan_offset = vlan_offset_160;
9118         req->vlan_cfg = is_kill;
9119         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9120
9121         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9122         if (ret)
9123                 dev_err(&hdev->pdev->dev,
9124                         "port vlan command, send fail, ret =%d.\n", ret);
9125         return ret;
9126 }
9127
9128 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9129                                     u16 vport_id, u16 vlan_id,
9130                                     bool is_kill)
9131 {
9132         u16 vport_idx, vport_num = 0;
9133         int ret;
9134
9135         if (is_kill && !vlan_id)
9136                 return 0;
9137
9138         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
9139                                        proto);
9140         if (ret) {
9141                 dev_err(&hdev->pdev->dev,
9142                         "Set %u vport vlan filter config fail, ret =%d.\n",
9143                         vport_id, ret);
9144                 return ret;
9145         }
9146
9147         /* vlan 0 may be added twice when 8021q module is enabled */
9148         if (!is_kill && !vlan_id &&
9149             test_bit(vport_id, hdev->vlan_table[vlan_id]))
9150                 return 0;
9151
9152         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9153                 dev_err(&hdev->pdev->dev,
9154                         "Add port vlan failed, vport %u is already in vlan %u\n",
9155                         vport_id, vlan_id);
9156                 return -EINVAL;
9157         }
9158
9159         if (is_kill &&
9160             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9161                 dev_err(&hdev->pdev->dev,
9162                         "Delete port vlan failed, vport %u is not in vlan %u\n",
9163                         vport_id, vlan_id);
9164                 return -EINVAL;
9165         }
9166
9167         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9168                 vport_num++;
9169
9170         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9171                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9172                                                  is_kill);
9173
9174         return ret;
9175 }
9176
9177 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9178 {
9179         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9180         struct hclge_vport_vtag_tx_cfg_cmd *req;
9181         struct hclge_dev *hdev = vport->back;
9182         struct hclge_desc desc;
9183         u16 bmap_index;
9184         int status;
9185
9186         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9187
9188         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9189         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9190         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9191         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9192                       vcfg->accept_tag1 ? 1 : 0);
9193         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9194                       vcfg->accept_untag1 ? 1 : 0);
9195         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9196                       vcfg->accept_tag2 ? 1 : 0);
9197         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9198                       vcfg->accept_untag2 ? 1 : 0);
9199         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9200                       vcfg->insert_tag1_en ? 1 : 0);
9201         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9202                       vcfg->insert_tag2_en ? 1 : 0);
9203         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9204                       vcfg->tag_shift_mode_en ? 1 : 0);
9205         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9206
9207         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9208         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9209                         HCLGE_VF_NUM_PER_BYTE;
9210         req->vf_bitmap[bmap_index] =
9211                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9212
9213         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9214         if (status)
9215                 dev_err(&hdev->pdev->dev,
9216                         "Send port txvlan cfg command fail, ret =%d\n",
9217                         status);
9218
9219         return status;
9220 }
9221
9222 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9223 {
9224         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9225         struct hclge_vport_vtag_rx_cfg_cmd *req;
9226         struct hclge_dev *hdev = vport->back;
9227         struct hclge_desc desc;
9228         u16 bmap_index;
9229         int status;
9230
9231         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9232
9233         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9234         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9235                       vcfg->strip_tag1_en ? 1 : 0);
9236         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9237                       vcfg->strip_tag2_en ? 1 : 0);
9238         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9239                       vcfg->vlan1_vlan_prionly ? 1 : 0);
9240         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9241                       vcfg->vlan2_vlan_prionly ? 1 : 0);
9242         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9243                       vcfg->strip_tag1_discard_en ? 1 : 0);
9244         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9245                       vcfg->strip_tag2_discard_en ? 1 : 0);
9246
9247         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9248         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9249                         HCLGE_VF_NUM_PER_BYTE;
9250         req->vf_bitmap[bmap_index] =
9251                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9252
9253         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9254         if (status)
9255                 dev_err(&hdev->pdev->dev,
9256                         "Send port rxvlan cfg command fail, ret =%d\n",
9257                         status);
9258
9259         return status;
9260 }
9261
9262 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9263                                   u16 port_base_vlan_state,
9264                                   u16 vlan_tag)
9265 {
9266         int ret;
9267
9268         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9269                 vport->txvlan_cfg.accept_tag1 = true;
9270                 vport->txvlan_cfg.insert_tag1_en = false;
9271                 vport->txvlan_cfg.default_tag1 = 0;
9272         } else {
9273                 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9274
9275                 vport->txvlan_cfg.accept_tag1 =
9276                         ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9277                 vport->txvlan_cfg.insert_tag1_en = true;
9278                 vport->txvlan_cfg.default_tag1 = vlan_tag;
9279         }
9280
9281         vport->txvlan_cfg.accept_untag1 = true;
9282
9283         /* accept_tag2 and accept_untag2 are not supported on
9284          * pdev revision(0x20), new revision support them,
9285          * this two fields can not be configured by user.
9286          */
9287         vport->txvlan_cfg.accept_tag2 = true;
9288         vport->txvlan_cfg.accept_untag2 = true;
9289         vport->txvlan_cfg.insert_tag2_en = false;
9290         vport->txvlan_cfg.default_tag2 = 0;
9291         vport->txvlan_cfg.tag_shift_mode_en = true;
9292
9293         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9294                 vport->rxvlan_cfg.strip_tag1_en = false;
9295                 vport->rxvlan_cfg.strip_tag2_en =
9296                                 vport->rxvlan_cfg.rx_vlan_offload_en;
9297                 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9298         } else {
9299                 vport->rxvlan_cfg.strip_tag1_en =
9300                                 vport->rxvlan_cfg.rx_vlan_offload_en;
9301                 vport->rxvlan_cfg.strip_tag2_en = true;
9302                 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9303         }
9304
9305         vport->rxvlan_cfg.strip_tag1_discard_en = false;
9306         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9307         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9308
9309         ret = hclge_set_vlan_tx_offload_cfg(vport);
9310         if (ret)
9311                 return ret;
9312
9313         return hclge_set_vlan_rx_offload_cfg(vport);
9314 }
9315
9316 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9317 {
9318         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9319         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9320         struct hclge_desc desc;
9321         int status;
9322
9323         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9324         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9325         rx_req->ot_fst_vlan_type =
9326                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9327         rx_req->ot_sec_vlan_type =
9328                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9329         rx_req->in_fst_vlan_type =
9330                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9331         rx_req->in_sec_vlan_type =
9332                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9333
9334         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9335         if (status) {
9336                 dev_err(&hdev->pdev->dev,
9337                         "Send rxvlan protocol type command fail, ret =%d\n",
9338                         status);
9339                 return status;
9340         }
9341
9342         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9343
9344         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9345         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9346         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9347
9348         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9349         if (status)
9350                 dev_err(&hdev->pdev->dev,
9351                         "Send txvlan protocol type command fail, ret =%d\n",
9352                         status);
9353
9354         return status;
9355 }
9356
9357 static int hclge_init_vlan_config(struct hclge_dev *hdev)
9358 {
9359 #define HCLGE_DEF_VLAN_TYPE             0x8100
9360
9361         struct hnae3_handle *handle = &hdev->vport[0].nic;
9362         struct hclge_vport *vport;
9363         int ret;
9364         int i;
9365
9366         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9367                 /* for revision 0x21, vf vlan filter is per function */
9368                 for (i = 0; i < hdev->num_alloc_vport; i++) {
9369                         vport = &hdev->vport[i];
9370                         ret = hclge_set_vlan_filter_ctrl(hdev,
9371                                                          HCLGE_FILTER_TYPE_VF,
9372                                                          HCLGE_FILTER_FE_EGRESS,
9373                                                          true,
9374                                                          vport->vport_id);
9375                         if (ret)
9376                                 return ret;
9377                 }
9378
9379                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9380                                                  HCLGE_FILTER_FE_INGRESS, true,
9381                                                  0);
9382                 if (ret)
9383                         return ret;
9384         } else {
9385                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9386                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
9387                                                  true, 0);
9388                 if (ret)
9389                         return ret;
9390         }
9391
9392         handle->netdev_flags |= HNAE3_VLAN_FLTR;
9393
9394         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9395         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9396         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9397         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9398         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
9399         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
9400
9401         ret = hclge_set_vlan_protocol_type(hdev);
9402         if (ret)
9403                 return ret;
9404
9405         for (i = 0; i < hdev->num_alloc_vport; i++) {
9406                 u16 vlan_tag;
9407
9408                 vport = &hdev->vport[i];
9409                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9410
9411                 ret = hclge_vlan_offload_cfg(vport,
9412                                              vport->port_base_vlan_cfg.state,
9413                                              vlan_tag);
9414                 if (ret)
9415                         return ret;
9416         }
9417
9418         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
9419 }
9420
9421 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9422                                        bool writen_to_tbl)
9423 {
9424         struct hclge_vport_vlan_cfg *vlan;
9425
9426         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
9427         if (!vlan)
9428                 return;
9429
9430         vlan->hd_tbl_status = writen_to_tbl;
9431         vlan->vlan_id = vlan_id;
9432
9433         list_add_tail(&vlan->node, &vport->vlan_list);
9434 }
9435
9436 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
9437 {
9438         struct hclge_vport_vlan_cfg *vlan, *tmp;
9439         struct hclge_dev *hdev = vport->back;
9440         int ret;
9441
9442         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9443                 if (!vlan->hd_tbl_status) {
9444                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9445                                                        vport->vport_id,
9446                                                        vlan->vlan_id, false);
9447                         if (ret) {
9448                                 dev_err(&hdev->pdev->dev,
9449                                         "restore vport vlan list failed, ret=%d\n",
9450                                         ret);
9451                                 return ret;
9452                         }
9453                 }
9454                 vlan->hd_tbl_status = true;
9455         }
9456
9457         return 0;
9458 }
9459
9460 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9461                                       bool is_write_tbl)
9462 {
9463         struct hclge_vport_vlan_cfg *vlan, *tmp;
9464         struct hclge_dev *hdev = vport->back;
9465
9466         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9467                 if (vlan->vlan_id == vlan_id) {
9468                         if (is_write_tbl && vlan->hd_tbl_status)
9469                                 hclge_set_vlan_filter_hw(hdev,
9470                                                          htons(ETH_P_8021Q),
9471                                                          vport->vport_id,
9472                                                          vlan_id,
9473                                                          true);
9474
9475                         list_del(&vlan->node);
9476                         kfree(vlan);
9477                         break;
9478                 }
9479         }
9480 }
9481
9482 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
9483 {
9484         struct hclge_vport_vlan_cfg *vlan, *tmp;
9485         struct hclge_dev *hdev = vport->back;
9486
9487         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9488                 if (vlan->hd_tbl_status)
9489                         hclge_set_vlan_filter_hw(hdev,
9490                                                  htons(ETH_P_8021Q),
9491                                                  vport->vport_id,
9492                                                  vlan->vlan_id,
9493                                                  true);
9494
9495                 vlan->hd_tbl_status = false;
9496                 if (is_del_list) {
9497                         list_del(&vlan->node);
9498                         kfree(vlan);
9499                 }
9500         }
9501         clear_bit(vport->vport_id, hdev->vf_vlan_full);
9502 }
9503
9504 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
9505 {
9506         struct hclge_vport_vlan_cfg *vlan, *tmp;
9507         struct hclge_vport *vport;
9508         int i;
9509
9510         for (i = 0; i < hdev->num_alloc_vport; i++) {
9511                 vport = &hdev->vport[i];
9512                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9513                         list_del(&vlan->node);
9514                         kfree(vlan);
9515                 }
9516         }
9517 }
9518
9519 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
9520 {
9521         struct hclge_vport_vlan_cfg *vlan, *tmp;
9522         struct hclge_dev *hdev = vport->back;
9523         u16 vlan_proto;
9524         u16 vlan_id;
9525         u16 state;
9526         int ret;
9527
9528         vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
9529         vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9530         state = vport->port_base_vlan_cfg.state;
9531
9532         if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
9533                 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
9534                 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
9535                                          vport->vport_id, vlan_id,
9536                                          false);
9537                 return;
9538         }
9539
9540         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9541                 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9542                                                vport->vport_id,
9543                                                vlan->vlan_id, false);
9544                 if (ret)
9545                         break;
9546                 vlan->hd_tbl_status = true;
9547         }
9548 }
9549
9550 /* For global reset and imp reset, hardware will clear the mac table,
9551  * so we change the mac address state from ACTIVE to TO_ADD, then they
9552  * can be restored in the service task after reset complete. Furtherly,
9553  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
9554  * be restored after reset, so just remove these mac nodes from mac_list.
9555  */
9556 static void hclge_mac_node_convert_for_reset(struct list_head *list)
9557 {
9558         struct hclge_mac_node *mac_node, *tmp;
9559
9560         list_for_each_entry_safe(mac_node, tmp, list, node) {
9561                 if (mac_node->state == HCLGE_MAC_ACTIVE) {
9562                         mac_node->state = HCLGE_MAC_TO_ADD;
9563                 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
9564                         list_del(&mac_node->node);
9565                         kfree(mac_node);
9566                 }
9567         }
9568 }
9569
9570 void hclge_restore_mac_table_common(struct hclge_vport *vport)
9571 {
9572         spin_lock_bh(&vport->mac_list_lock);
9573
9574         hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
9575         hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
9576         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9577
9578         spin_unlock_bh(&vport->mac_list_lock);
9579 }
9580
9581 static void hclge_restore_hw_table(struct hclge_dev *hdev)
9582 {
9583         struct hclge_vport *vport = &hdev->vport[0];
9584         struct hnae3_handle *handle = &vport->nic;
9585
9586         hclge_restore_mac_table_common(vport);
9587         hclge_restore_vport_vlan_table(vport);
9588         set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
9589
9590         hclge_restore_fd_entries(handle);
9591 }
9592
9593 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
9594 {
9595         struct hclge_vport *vport = hclge_get_vport(handle);
9596
9597         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9598                 vport->rxvlan_cfg.strip_tag1_en = false;
9599                 vport->rxvlan_cfg.strip_tag2_en = enable;
9600                 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9601         } else {
9602                 vport->rxvlan_cfg.strip_tag1_en = enable;
9603                 vport->rxvlan_cfg.strip_tag2_en = true;
9604                 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9605         }
9606
9607         vport->rxvlan_cfg.strip_tag1_discard_en = false;
9608         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9609         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9610         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
9611
9612         return hclge_set_vlan_rx_offload_cfg(vport);
9613 }
9614
9615 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
9616                                             u16 port_base_vlan_state,
9617                                             struct hclge_vlan_info *new_info,
9618                                             struct hclge_vlan_info *old_info)
9619 {
9620         struct hclge_dev *hdev = vport->back;
9621         int ret;
9622
9623         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
9624                 hclge_rm_vport_all_vlan_table(vport, false);
9625                 return hclge_set_vlan_filter_hw(hdev,
9626                                                  htons(new_info->vlan_proto),
9627                                                  vport->vport_id,
9628                                                  new_info->vlan_tag,
9629                                                  false);
9630         }
9631
9632         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
9633                                        vport->vport_id, old_info->vlan_tag,
9634                                        true);
9635         if (ret)
9636                 return ret;
9637
9638         return hclge_add_vport_all_vlan_table(vport);
9639 }
9640
9641 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
9642                                     struct hclge_vlan_info *vlan_info)
9643 {
9644         struct hnae3_handle *nic = &vport->nic;
9645         struct hclge_vlan_info *old_vlan_info;
9646         struct hclge_dev *hdev = vport->back;
9647         int ret;
9648
9649         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9650
9651         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
9652         if (ret)
9653                 return ret;
9654
9655         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
9656                 /* add new VLAN tag */
9657                 ret = hclge_set_vlan_filter_hw(hdev,
9658                                                htons(vlan_info->vlan_proto),
9659                                                vport->vport_id,
9660                                                vlan_info->vlan_tag,
9661                                                false);
9662                 if (ret)
9663                         return ret;
9664
9665                 /* remove old VLAN tag */
9666                 ret = hclge_set_vlan_filter_hw(hdev,
9667                                                htons(old_vlan_info->vlan_proto),
9668                                                vport->vport_id,
9669                                                old_vlan_info->vlan_tag,
9670                                                true);
9671                 if (ret)
9672                         return ret;
9673
9674                 goto update;
9675         }
9676
9677         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
9678                                                old_vlan_info);
9679         if (ret)
9680                 return ret;
9681
9682         /* update state only when disable/enable port based VLAN */
9683         vport->port_base_vlan_cfg.state = state;
9684         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
9685                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
9686         else
9687                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
9688
9689 update:
9690         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
9691         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
9692         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
9693
9694         return 0;
9695 }
9696
9697 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
9698                                           enum hnae3_port_base_vlan_state state,
9699                                           u16 vlan)
9700 {
9701         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9702                 if (!vlan)
9703                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9704                 else
9705                         return HNAE3_PORT_BASE_VLAN_ENABLE;
9706         } else {
9707                 if (!vlan)
9708                         return HNAE3_PORT_BASE_VLAN_DISABLE;
9709                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
9710                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9711                 else
9712                         return HNAE3_PORT_BASE_VLAN_MODIFY;
9713         }
9714 }
9715
9716 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
9717                                     u16 vlan, u8 qos, __be16 proto)
9718 {
9719         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
9720         struct hclge_vport *vport = hclge_get_vport(handle);
9721         struct hclge_dev *hdev = vport->back;
9722         struct hclge_vlan_info vlan_info;
9723         u16 state;
9724         int ret;
9725
9726         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9727                 return -EOPNOTSUPP;
9728
9729         vport = hclge_get_vf_vport(hdev, vfid);
9730         if (!vport)
9731                 return -EINVAL;
9732
9733         /* qos is a 3 bits value, so can not be bigger than 7 */
9734         if (vlan > VLAN_N_VID - 1 || qos > 7)
9735                 return -EINVAL;
9736         if (proto != htons(ETH_P_8021Q))
9737                 return -EPROTONOSUPPORT;
9738
9739         state = hclge_get_port_base_vlan_state(vport,
9740                                                vport->port_base_vlan_cfg.state,
9741                                                vlan);
9742         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
9743                 return 0;
9744
9745         vlan_info.vlan_tag = vlan;
9746         vlan_info.qos = qos;
9747         vlan_info.vlan_proto = ntohs(proto);
9748
9749         ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
9750         if (ret) {
9751                 dev_err(&hdev->pdev->dev,
9752                         "failed to update port base vlan for vf %d, ret = %d\n",
9753                         vfid, ret);
9754                 return ret;
9755         }
9756
9757         /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
9758          * VLAN state.
9759          */
9760         if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
9761             test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
9762                 hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9763                                                   vport->vport_id, state,
9764                                                   vlan, qos,
9765                                                   ntohs(proto));
9766
9767         return 0;
9768 }
9769
9770 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9771 {
9772         struct hclge_vlan_info *vlan_info;
9773         struct hclge_vport *vport;
9774         int ret;
9775         int vf;
9776
9777         /* clear port base vlan for all vf */
9778         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9779                 vport = &hdev->vport[vf];
9780                 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9781
9782                 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9783                                                vport->vport_id,
9784                                                vlan_info->vlan_tag, true);
9785                 if (ret)
9786                         dev_err(&hdev->pdev->dev,
9787                                 "failed to clear vf vlan for vf%d, ret = %d\n",
9788                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
9789         }
9790 }
9791
9792 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9793                           u16 vlan_id, bool is_kill)
9794 {
9795         struct hclge_vport *vport = hclge_get_vport(handle);
9796         struct hclge_dev *hdev = vport->back;
9797         bool writen_to_tbl = false;
9798         int ret = 0;
9799
9800         /* When device is resetting or reset failed, firmware is unable to
9801          * handle mailbox. Just record the vlan id, and remove it after
9802          * reset finished.
9803          */
9804         if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9805              test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
9806                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9807                 return -EBUSY;
9808         }
9809
9810         /* when port base vlan enabled, we use port base vlan as the vlan
9811          * filter entry. In this case, we don't update vlan filter table
9812          * when user add new vlan or remove exist vlan, just update the vport
9813          * vlan list. The vlan id in vlan list will be writen in vlan filter
9814          * table until port base vlan disabled
9815          */
9816         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9817                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
9818                                                vlan_id, is_kill);
9819                 writen_to_tbl = true;
9820         }
9821
9822         if (!ret) {
9823                 if (is_kill)
9824                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
9825                 else
9826                         hclge_add_vport_vlan_table(vport, vlan_id,
9827                                                    writen_to_tbl);
9828         } else if (is_kill) {
9829                 /* when remove hw vlan filter failed, record the vlan id,
9830                  * and try to remove it from hw later, to be consistence
9831                  * with stack
9832                  */
9833                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
9834         }
9835         return ret;
9836 }
9837
9838 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9839 {
9840 #define HCLGE_MAX_SYNC_COUNT    60
9841
9842         int i, ret, sync_cnt = 0;
9843         u16 vlan_id;
9844
9845         /* start from vport 1 for PF is always alive */
9846         for (i = 0; i < hdev->num_alloc_vport; i++) {
9847                 struct hclge_vport *vport = &hdev->vport[i];
9848
9849                 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9850                                          VLAN_N_VID);
9851                 while (vlan_id != VLAN_N_VID) {
9852                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9853                                                        vport->vport_id, vlan_id,
9854                                                        true);
9855                         if (ret && ret != -EINVAL)
9856                                 return;
9857
9858                         clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9859                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
9860
9861                         sync_cnt++;
9862                         if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9863                                 return;
9864
9865                         vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9866                                                  VLAN_N_VID);
9867                 }
9868         }
9869 }
9870
9871 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
9872 {
9873         struct hclge_config_max_frm_size_cmd *req;
9874         struct hclge_desc desc;
9875
9876         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9877
9878         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
9879         req->max_frm_size = cpu_to_le16(new_mps);
9880         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
9881
9882         return hclge_cmd_send(&hdev->hw, &desc, 1);
9883 }
9884
9885 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9886 {
9887         struct hclge_vport *vport = hclge_get_vport(handle);
9888
9889         return hclge_set_vport_mtu(vport, new_mtu);
9890 }
9891
9892 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9893 {
9894         struct hclge_dev *hdev = vport->back;
9895         int i, max_frm_size, ret;
9896
9897         /* HW supprt 2 layer vlan */
9898         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9899         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9900             max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
9901                 return -EINVAL;
9902
9903         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9904         mutex_lock(&hdev->vport_lock);
9905         /* VF's mps must fit within hdev->mps */
9906         if (vport->vport_id && max_frm_size > hdev->mps) {
9907                 mutex_unlock(&hdev->vport_lock);
9908                 return -EINVAL;
9909         } else if (vport->vport_id) {
9910                 vport->mps = max_frm_size;
9911                 mutex_unlock(&hdev->vport_lock);
9912                 return 0;
9913         }
9914
9915         /* PF's mps must be greater then VF's mps */
9916         for (i = 1; i < hdev->num_alloc_vport; i++)
9917                 if (max_frm_size < hdev->vport[i].mps) {
9918                         mutex_unlock(&hdev->vport_lock);
9919                         return -EINVAL;
9920                 }
9921
9922         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9923
9924         ret = hclge_set_mac_mtu(hdev, max_frm_size);
9925         if (ret) {
9926                 dev_err(&hdev->pdev->dev,
9927                         "Change mtu fail, ret =%d\n", ret);
9928                 goto out;
9929         }
9930
9931         hdev->mps = max_frm_size;
9932         vport->mps = max_frm_size;
9933
9934         ret = hclge_buffer_alloc(hdev);
9935         if (ret)
9936                 dev_err(&hdev->pdev->dev,
9937                         "Allocate buffer fail, ret =%d\n", ret);
9938
9939 out:
9940         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
9941         mutex_unlock(&hdev->vport_lock);
9942         return ret;
9943 }
9944
9945 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
9946                                     bool enable)
9947 {
9948         struct hclge_reset_tqp_queue_cmd *req;
9949         struct hclge_desc desc;
9950         int ret;
9951
9952         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
9953
9954         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9955         req->tqp_id = cpu_to_le16(queue_id);
9956         if (enable)
9957                 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
9958
9959         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9960         if (ret) {
9961                 dev_err(&hdev->pdev->dev,
9962                         "Send tqp reset cmd error, status =%d\n", ret);
9963                 return ret;
9964         }
9965
9966         return 0;
9967 }
9968
9969 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
9970 {
9971         struct hclge_reset_tqp_queue_cmd *req;
9972         struct hclge_desc desc;
9973         int ret;
9974
9975         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
9976
9977         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9978         req->tqp_id = cpu_to_le16(queue_id);
9979
9980         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9981         if (ret) {
9982                 dev_err(&hdev->pdev->dev,
9983                         "Get reset status error, status =%d\n", ret);
9984                 return ret;
9985         }
9986
9987         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
9988 }
9989
9990 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
9991 {
9992         struct hnae3_queue *queue;
9993         struct hclge_tqp *tqp;
9994
9995         queue = handle->kinfo.tqp[queue_id];
9996         tqp = container_of(queue, struct hclge_tqp, q);
9997
9998         return tqp->index;
9999 }
10000
10001 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
10002 {
10003         struct hclge_vport *vport = hclge_get_vport(handle);
10004         struct hclge_dev *hdev = vport->back;
10005         int reset_try_times = 0;
10006         int reset_status;
10007         u16 queue_gid;
10008         int ret;
10009
10010         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
10011
10012         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
10013         if (ret) {
10014                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
10015                 return ret;
10016         }
10017
10018         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
10019         if (ret) {
10020                 dev_err(&hdev->pdev->dev,
10021                         "Send reset tqp cmd fail, ret = %d\n", ret);
10022                 return ret;
10023         }
10024
10025         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10026                 reset_status = hclge_get_reset_status(hdev, queue_gid);
10027                 if (reset_status)
10028                         break;
10029
10030                 /* Wait for tqp hw reset */
10031                 usleep_range(1000, 1200);
10032         }
10033
10034         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10035                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
10036                 return ret;
10037         }
10038
10039         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
10040         if (ret)
10041                 dev_err(&hdev->pdev->dev,
10042                         "Deassert the soft reset fail, ret = %d\n", ret);
10043
10044         return ret;
10045 }
10046
10047 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
10048 {
10049         struct hnae3_handle *handle = &vport->nic;
10050         struct hclge_dev *hdev = vport->back;
10051         int reset_try_times = 0;
10052         int reset_status;
10053         u16 queue_gid;
10054         int ret;
10055
10056         if (queue_id >= handle->kinfo.num_tqps) {
10057                 dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
10058                          queue_id);
10059                 return;
10060         }
10061
10062         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
10063
10064         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
10065         if (ret) {
10066                 dev_warn(&hdev->pdev->dev,
10067                          "Send reset tqp cmd fail, ret = %d\n", ret);
10068                 return;
10069         }
10070
10071         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10072                 reset_status = hclge_get_reset_status(hdev, queue_gid);
10073                 if (reset_status)
10074                         break;
10075
10076                 /* Wait for tqp hw reset */
10077                 usleep_range(1000, 1200);
10078         }
10079
10080         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10081                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
10082                 return;
10083         }
10084
10085         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
10086         if (ret)
10087                 dev_warn(&hdev->pdev->dev,
10088                          "Deassert the soft reset fail, ret = %d\n", ret);
10089 }
10090
10091 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10092 {
10093         struct hclge_vport *vport = hclge_get_vport(handle);
10094         struct hclge_dev *hdev = vport->back;
10095
10096         return hdev->fw_version;
10097 }
10098
10099 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10100 {
10101         struct phy_device *phydev = hdev->hw.mac.phydev;
10102
10103         if (!phydev)
10104                 return;
10105
10106         phy_set_asym_pause(phydev, rx_en, tx_en);
10107 }
10108
10109 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10110 {
10111         int ret;
10112
10113         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10114                 return 0;
10115
10116         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10117         if (ret)
10118                 dev_err(&hdev->pdev->dev,
10119                         "configure pauseparam error, ret = %d.\n", ret);
10120
10121         return ret;
10122 }
10123
10124 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10125 {
10126         struct phy_device *phydev = hdev->hw.mac.phydev;
10127         u16 remote_advertising = 0;
10128         u16 local_advertising;
10129         u32 rx_pause, tx_pause;
10130         u8 flowctl;
10131
10132         if (!phydev->link || !phydev->autoneg)
10133                 return 0;
10134
10135         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10136
10137         if (phydev->pause)
10138                 remote_advertising = LPA_PAUSE_CAP;
10139
10140         if (phydev->asym_pause)
10141                 remote_advertising |= LPA_PAUSE_ASYM;
10142
10143         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10144                                            remote_advertising);
10145         tx_pause = flowctl & FLOW_CTRL_TX;
10146         rx_pause = flowctl & FLOW_CTRL_RX;
10147
10148         if (phydev->duplex == HCLGE_MAC_HALF) {
10149                 tx_pause = 0;
10150                 rx_pause = 0;
10151         }
10152
10153         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10154 }
10155
10156 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10157                                  u32 *rx_en, u32 *tx_en)
10158 {
10159         struct hclge_vport *vport = hclge_get_vport(handle);
10160         struct hclge_dev *hdev = vport->back;
10161         struct phy_device *phydev = hdev->hw.mac.phydev;
10162
10163         *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
10164
10165         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10166                 *rx_en = 0;
10167                 *tx_en = 0;
10168                 return;
10169         }
10170
10171         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10172                 *rx_en = 1;
10173                 *tx_en = 0;
10174         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10175                 *tx_en = 1;
10176                 *rx_en = 0;
10177         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10178                 *rx_en = 1;
10179                 *tx_en = 1;
10180         } else {
10181                 *rx_en = 0;
10182                 *tx_en = 0;
10183         }
10184 }
10185
10186 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10187                                          u32 rx_en, u32 tx_en)
10188 {
10189         if (rx_en && tx_en)
10190                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
10191         else if (rx_en && !tx_en)
10192                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10193         else if (!rx_en && tx_en)
10194                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10195         else
10196                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
10197
10198         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10199 }
10200
10201 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10202                                 u32 rx_en, u32 tx_en)
10203 {
10204         struct hclge_vport *vport = hclge_get_vport(handle);
10205         struct hclge_dev *hdev = vport->back;
10206         struct phy_device *phydev = hdev->hw.mac.phydev;
10207         u32 fc_autoneg;
10208
10209         if (phydev) {
10210                 fc_autoneg = hclge_get_autoneg(handle);
10211                 if (auto_neg != fc_autoneg) {
10212                         dev_info(&hdev->pdev->dev,
10213                                  "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10214                         return -EOPNOTSUPP;
10215                 }
10216         }
10217
10218         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10219                 dev_info(&hdev->pdev->dev,
10220                          "Priority flow control enabled. Cannot set link flow control.\n");
10221                 return -EOPNOTSUPP;
10222         }
10223
10224         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10225
10226         hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10227
10228         if (!auto_neg)
10229                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10230
10231         if (phydev)
10232                 return phy_start_aneg(phydev);
10233
10234         return -EOPNOTSUPP;
10235 }
10236
10237 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10238                                           u8 *auto_neg, u32 *speed, u8 *duplex)
10239 {
10240         struct hclge_vport *vport = hclge_get_vport(handle);
10241         struct hclge_dev *hdev = vport->back;
10242
10243         if (speed)
10244                 *speed = hdev->hw.mac.speed;
10245         if (duplex)
10246                 *duplex = hdev->hw.mac.duplex;
10247         if (auto_neg)
10248                 *auto_neg = hdev->hw.mac.autoneg;
10249 }
10250
10251 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
10252                                  u8 *module_type)
10253 {
10254         struct hclge_vport *vport = hclge_get_vport(handle);
10255         struct hclge_dev *hdev = vport->back;
10256
10257         /* When nic is down, the service task is not running, doesn't update
10258          * the port information per second. Query the port information before
10259          * return the media type, ensure getting the correct media information.
10260          */
10261         hclge_update_port_info(hdev);
10262
10263         if (media_type)
10264                 *media_type = hdev->hw.mac.media_type;
10265
10266         if (module_type)
10267                 *module_type = hdev->hw.mac.module_type;
10268 }
10269
10270 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
10271                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
10272 {
10273         struct hclge_vport *vport = hclge_get_vport(handle);
10274         struct hclge_dev *hdev = vport->back;
10275         struct phy_device *phydev = hdev->hw.mac.phydev;
10276         int mdix_ctrl, mdix, is_resolved;
10277         unsigned int retval;
10278
10279         if (!phydev) {
10280                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10281                 *tp_mdix = ETH_TP_MDI_INVALID;
10282                 return;
10283         }
10284
10285         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
10286
10287         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
10288         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
10289                                     HCLGE_PHY_MDIX_CTRL_S);
10290
10291         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
10292         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
10293         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
10294
10295         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
10296
10297         switch (mdix_ctrl) {
10298         case 0x0:
10299                 *tp_mdix_ctrl = ETH_TP_MDI;
10300                 break;
10301         case 0x1:
10302                 *tp_mdix_ctrl = ETH_TP_MDI_X;
10303                 break;
10304         case 0x3:
10305                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
10306                 break;
10307         default:
10308                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10309                 break;
10310         }
10311
10312         if (!is_resolved)
10313                 *tp_mdix = ETH_TP_MDI_INVALID;
10314         else if (mdix)
10315                 *tp_mdix = ETH_TP_MDI_X;
10316         else
10317                 *tp_mdix = ETH_TP_MDI;
10318 }
10319
10320 static void hclge_info_show(struct hclge_dev *hdev)
10321 {
10322         struct device *dev = &hdev->pdev->dev;
10323
10324         dev_info(dev, "PF info begin:\n");
10325
10326         dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
10327         dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
10328         dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
10329         dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
10330         dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
10331         dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
10332         dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
10333         dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
10334         dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
10335         dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
10336         dev_info(dev, "This is %s PF\n",
10337                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
10338         dev_info(dev, "DCB %s\n",
10339                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
10340         dev_info(dev, "MQPRIO %s\n",
10341                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
10342
10343         dev_info(dev, "PF info end.\n");
10344 }
10345
10346 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
10347                                           struct hclge_vport *vport)
10348 {
10349         struct hnae3_client *client = vport->nic.client;
10350         struct hclge_dev *hdev = ae_dev->priv;
10351         int rst_cnt = hdev->rst_stats.reset_cnt;
10352         int ret;
10353
10354         ret = client->ops->init_instance(&vport->nic);
10355         if (ret)
10356                 return ret;
10357
10358         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10359         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10360             rst_cnt != hdev->rst_stats.reset_cnt) {
10361                 ret = -EBUSY;
10362                 goto init_nic_err;
10363         }
10364
10365         /* Enable nic hw error interrupts */
10366         ret = hclge_config_nic_hw_error(hdev, true);
10367         if (ret) {
10368                 dev_err(&ae_dev->pdev->dev,
10369                         "fail(%d) to enable hw error interrupts\n", ret);
10370                 goto init_nic_err;
10371         }
10372
10373         hnae3_set_client_init_flag(client, ae_dev, 1);
10374
10375         if (netif_msg_drv(&hdev->vport->nic))
10376                 hclge_info_show(hdev);
10377
10378         return ret;
10379
10380 init_nic_err:
10381         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10382         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10383                 msleep(HCLGE_WAIT_RESET_DONE);
10384
10385         client->ops->uninit_instance(&vport->nic, 0);
10386
10387         return ret;
10388 }
10389
10390 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
10391                                            struct hclge_vport *vport)
10392 {
10393         struct hclge_dev *hdev = ae_dev->priv;
10394         struct hnae3_client *client;
10395         int rst_cnt;
10396         int ret;
10397
10398         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
10399             !hdev->nic_client)
10400                 return 0;
10401
10402         client = hdev->roce_client;
10403         ret = hclge_init_roce_base_info(vport);
10404         if (ret)
10405                 return ret;
10406
10407         rst_cnt = hdev->rst_stats.reset_cnt;
10408         ret = client->ops->init_instance(&vport->roce);
10409         if (ret)
10410                 return ret;
10411
10412         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10413         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10414             rst_cnt != hdev->rst_stats.reset_cnt) {
10415                 ret = -EBUSY;
10416                 goto init_roce_err;
10417         }
10418
10419         /* Enable roce ras interrupts */
10420         ret = hclge_config_rocee_ras_interrupt(hdev, true);
10421         if (ret) {
10422                 dev_err(&ae_dev->pdev->dev,
10423                         "fail(%d) to enable roce ras interrupts\n", ret);
10424                 goto init_roce_err;
10425         }
10426
10427         hnae3_set_client_init_flag(client, ae_dev, 1);
10428
10429         return 0;
10430
10431 init_roce_err:
10432         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10433         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10434                 msleep(HCLGE_WAIT_RESET_DONE);
10435
10436         hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
10437
10438         return ret;
10439 }
10440
10441 static int hclge_init_client_instance(struct hnae3_client *client,
10442                                       struct hnae3_ae_dev *ae_dev)
10443 {
10444         struct hclge_dev *hdev = ae_dev->priv;
10445         struct hclge_vport *vport;
10446         int i, ret;
10447
10448         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
10449                 vport = &hdev->vport[i];
10450
10451                 switch (client->type) {
10452                 case HNAE3_CLIENT_KNIC:
10453                         hdev->nic_client = client;
10454                         vport->nic.client = client;
10455                         ret = hclge_init_nic_client_instance(ae_dev, vport);
10456                         if (ret)
10457                                 goto clear_nic;
10458
10459                         ret = hclge_init_roce_client_instance(ae_dev, vport);
10460                         if (ret)
10461                                 goto clear_roce;
10462
10463                         break;
10464                 case HNAE3_CLIENT_ROCE:
10465                         if (hnae3_dev_roce_supported(hdev)) {
10466                                 hdev->roce_client = client;
10467                                 vport->roce.client = client;
10468                         }
10469
10470                         ret = hclge_init_roce_client_instance(ae_dev, vport);
10471                         if (ret)
10472                                 goto clear_roce;
10473
10474                         break;
10475                 default:
10476                         return -EINVAL;
10477                 }
10478         }
10479
10480         return 0;
10481
10482 clear_nic:
10483         hdev->nic_client = NULL;
10484         vport->nic.client = NULL;
10485         return ret;
10486 clear_roce:
10487         hdev->roce_client = NULL;
10488         vport->roce.client = NULL;
10489         return ret;
10490 }
10491
10492 static void hclge_uninit_client_instance(struct hnae3_client *client,
10493                                          struct hnae3_ae_dev *ae_dev)
10494 {
10495         struct hclge_dev *hdev = ae_dev->priv;
10496         struct hclge_vport *vport;
10497         int i;
10498
10499         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
10500                 vport = &hdev->vport[i];
10501                 if (hdev->roce_client) {
10502                         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10503                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10504                                 msleep(HCLGE_WAIT_RESET_DONE);
10505
10506                         hdev->roce_client->ops->uninit_instance(&vport->roce,
10507                                                                 0);
10508                         hdev->roce_client = NULL;
10509                         vport->roce.client = NULL;
10510                 }
10511                 if (client->type == HNAE3_CLIENT_ROCE)
10512                         return;
10513                 if (hdev->nic_client && client->ops->uninit_instance) {
10514                         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10515                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10516                                 msleep(HCLGE_WAIT_RESET_DONE);
10517
10518                         client->ops->uninit_instance(&vport->nic, 0);
10519                         hdev->nic_client = NULL;
10520                         vport->nic.client = NULL;
10521                 }
10522         }
10523 }
10524
10525 static int hclge_dev_mem_map(struct hclge_dev *hdev)
10526 {
10527 #define HCLGE_MEM_BAR           4
10528
10529         struct pci_dev *pdev = hdev->pdev;
10530         struct hclge_hw *hw = &hdev->hw;
10531
10532         /* for device does not have device memory, return directly */
10533         if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
10534                 return 0;
10535
10536         hw->mem_base = devm_ioremap_wc(&pdev->dev,
10537                                        pci_resource_start(pdev, HCLGE_MEM_BAR),
10538                                        pci_resource_len(pdev, HCLGE_MEM_BAR));
10539         if (!hw->mem_base) {
10540                 dev_err(&pdev->dev, "failed to map device memory\n");
10541                 return -EFAULT;
10542         }
10543
10544         return 0;
10545 }
10546
10547 static int hclge_pci_init(struct hclge_dev *hdev)
10548 {
10549         struct pci_dev *pdev = hdev->pdev;
10550         struct hclge_hw *hw;
10551         int ret;
10552
10553         ret = pci_enable_device(pdev);
10554         if (ret) {
10555                 dev_err(&pdev->dev, "failed to enable PCI device\n");
10556                 return ret;
10557         }
10558
10559         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10560         if (ret) {
10561                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10562                 if (ret) {
10563                         dev_err(&pdev->dev,
10564                                 "can't set consistent PCI DMA");
10565                         goto err_disable_device;
10566                 }
10567                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
10568         }
10569
10570         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
10571         if (ret) {
10572                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
10573                 goto err_disable_device;
10574         }
10575
10576         pci_set_master(pdev);
10577         hw = &hdev->hw;
10578         hw->io_base = pcim_iomap(pdev, 2, 0);
10579         if (!hw->io_base) {
10580                 dev_err(&pdev->dev, "Can't map configuration register space\n");
10581                 ret = -ENOMEM;
10582                 goto err_clr_master;
10583         }
10584
10585         ret = hclge_dev_mem_map(hdev);
10586         if (ret)
10587                 goto err_unmap_io_base;
10588
10589         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
10590
10591         return 0;
10592
10593 err_unmap_io_base:
10594         pcim_iounmap(pdev, hdev->hw.io_base);
10595 err_clr_master:
10596         pci_clear_master(pdev);
10597         pci_release_regions(pdev);
10598 err_disable_device:
10599         pci_disable_device(pdev);
10600
10601         return ret;
10602 }
10603
10604 static void hclge_pci_uninit(struct hclge_dev *hdev)
10605 {
10606         struct pci_dev *pdev = hdev->pdev;
10607
10608         if (hdev->hw.mem_base)
10609                 devm_iounmap(&pdev->dev, hdev->hw.mem_base);
10610
10611         pcim_iounmap(pdev, hdev->hw.io_base);
10612         pci_free_irq_vectors(pdev);
10613         pci_clear_master(pdev);
10614         pci_release_mem_regions(pdev);
10615         pci_disable_device(pdev);
10616 }
10617
10618 static void hclge_state_init(struct hclge_dev *hdev)
10619 {
10620         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
10621         set_bit(HCLGE_STATE_DOWN, &hdev->state);
10622         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
10623         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10624         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
10625         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
10626         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
10627 }
10628
10629 static void hclge_state_uninit(struct hclge_dev *hdev)
10630 {
10631         set_bit(HCLGE_STATE_DOWN, &hdev->state);
10632         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
10633
10634         if (hdev->reset_timer.function)
10635                 del_timer_sync(&hdev->reset_timer);
10636         if (hdev->service_task.work.func)
10637                 cancel_delayed_work_sync(&hdev->service_task);
10638 }
10639
10640 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
10641 {
10642 #define HCLGE_FLR_RETRY_WAIT_MS 500
10643 #define HCLGE_FLR_RETRY_CNT     5
10644
10645         struct hclge_dev *hdev = ae_dev->priv;
10646         int retry_cnt = 0;
10647         int ret;
10648
10649 retry:
10650         down(&hdev->reset_sem);
10651         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10652         hdev->reset_type = HNAE3_FLR_RESET;
10653         ret = hclge_reset_prepare(hdev);
10654         if (ret || hdev->reset_pending) {
10655                 dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
10656                         ret);
10657                 if (hdev->reset_pending ||
10658                     retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
10659                         dev_err(&hdev->pdev->dev,
10660                                 "reset_pending:0x%lx, retry_cnt:%d\n",
10661                                 hdev->reset_pending, retry_cnt);
10662                         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10663                         up(&hdev->reset_sem);
10664                         msleep(HCLGE_FLR_RETRY_WAIT_MS);
10665                         goto retry;
10666                 }
10667         }
10668
10669         /* disable misc vector before FLR done */
10670         hclge_enable_vector(&hdev->misc_vector, false);
10671         set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
10672         hdev->rst_stats.flr_rst_cnt++;
10673 }
10674
10675 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
10676 {
10677         struct hclge_dev *hdev = ae_dev->priv;
10678         int ret;
10679
10680         hclge_enable_vector(&hdev->misc_vector, true);
10681
10682         ret = hclge_reset_rebuild(hdev);
10683         if (ret)
10684                 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
10685
10686         hdev->reset_type = HNAE3_NONE_RESET;
10687         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10688         up(&hdev->reset_sem);
10689 }
10690
10691 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
10692 {
10693         u16 i;
10694
10695         for (i = 0; i < hdev->num_alloc_vport; i++) {
10696                 struct hclge_vport *vport = &hdev->vport[i];
10697                 int ret;
10698
10699                  /* Send cmd to clear VF's FUNC_RST_ING */
10700                 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
10701                 if (ret)
10702                         dev_warn(&hdev->pdev->dev,
10703                                  "clear vf(%u) rst failed %d!\n",
10704                                  vport->vport_id, ret);
10705         }
10706 }
10707
10708 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
10709 {
10710         struct pci_dev *pdev = ae_dev->pdev;
10711         struct hclge_dev *hdev;
10712         int ret;
10713
10714         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
10715         if (!hdev)
10716                 return -ENOMEM;
10717
10718         hdev->pdev = pdev;
10719         hdev->ae_dev = ae_dev;
10720         hdev->reset_type = HNAE3_NONE_RESET;
10721         hdev->reset_level = HNAE3_FUNC_RESET;
10722         ae_dev->priv = hdev;
10723
10724         /* HW supprt 2 layer vlan */
10725         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10726
10727         mutex_init(&hdev->vport_lock);
10728         spin_lock_init(&hdev->fd_rule_lock);
10729         sema_init(&hdev->reset_sem, 1);
10730
10731         ret = hclge_pci_init(hdev);
10732         if (ret)
10733                 goto out;
10734
10735         /* Firmware command queue initialize */
10736         ret = hclge_cmd_queue_init(hdev);
10737         if (ret)
10738                 goto err_pci_uninit;
10739
10740         /* Firmware command initialize */
10741         ret = hclge_cmd_init(hdev);
10742         if (ret)
10743                 goto err_cmd_uninit;
10744
10745         ret = hclge_get_cap(hdev);
10746         if (ret)
10747                 goto err_cmd_uninit;
10748
10749         ret = hclge_query_dev_specs(hdev);
10750         if (ret) {
10751                 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
10752                         ret);
10753                 goto err_cmd_uninit;
10754         }
10755
10756         ret = hclge_configure(hdev);
10757         if (ret) {
10758                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
10759                 goto err_cmd_uninit;
10760         }
10761
10762         ret = hclge_init_msi(hdev);
10763         if (ret) {
10764                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
10765                 goto err_cmd_uninit;
10766         }
10767
10768         ret = hclge_misc_irq_init(hdev);
10769         if (ret)
10770                 goto err_msi_uninit;
10771
10772         ret = hclge_alloc_tqps(hdev);
10773         if (ret) {
10774                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
10775                 goto err_msi_irq_uninit;
10776         }
10777
10778         ret = hclge_alloc_vport(hdev);
10779         if (ret)
10780                 goto err_msi_irq_uninit;
10781
10782         ret = hclge_map_tqp(hdev);
10783         if (ret)
10784                 goto err_msi_irq_uninit;
10785
10786         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
10787             !hnae3_dev_phy_imp_supported(hdev)) {
10788                 ret = hclge_mac_mdio_config(hdev);
10789                 if (ret)
10790                         goto err_msi_irq_uninit;
10791         }
10792
10793         ret = hclge_init_umv_space(hdev);
10794         if (ret)
10795                 goto err_mdiobus_unreg;
10796
10797         ret = hclge_mac_init(hdev);
10798         if (ret) {
10799                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10800                 goto err_mdiobus_unreg;
10801         }
10802
10803         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10804         if (ret) {
10805                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10806                 goto err_mdiobus_unreg;
10807         }
10808
10809         ret = hclge_config_gro(hdev, true);
10810         if (ret)
10811                 goto err_mdiobus_unreg;
10812
10813         ret = hclge_init_vlan_config(hdev);
10814         if (ret) {
10815                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10816                 goto err_mdiobus_unreg;
10817         }
10818
10819         ret = hclge_tm_schd_init(hdev);
10820         if (ret) {
10821                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
10822                 goto err_mdiobus_unreg;
10823         }
10824
10825         ret = hclge_rss_init_cfg(hdev);
10826         if (ret) {
10827                 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
10828                 goto err_mdiobus_unreg;
10829         }
10830
10831         ret = hclge_rss_init_hw(hdev);
10832         if (ret) {
10833                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10834                 goto err_mdiobus_unreg;
10835         }
10836
10837         ret = init_mgr_tbl(hdev);
10838         if (ret) {
10839                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
10840                 goto err_mdiobus_unreg;
10841         }
10842
10843         ret = hclge_init_fd_config(hdev);
10844         if (ret) {
10845                 dev_err(&pdev->dev,
10846                         "fd table init fail, ret=%d\n", ret);
10847                 goto err_mdiobus_unreg;
10848         }
10849
10850         INIT_KFIFO(hdev->mac_tnl_log);
10851
10852         hclge_dcb_ops_set(hdev);
10853
10854         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
10855         INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
10856
10857         /* Setup affinity after service timer setup because add_timer_on
10858          * is called in affinity notify.
10859          */
10860         hclge_misc_affinity_setup(hdev);
10861
10862         hclge_clear_all_event_cause(hdev);
10863         hclge_clear_resetting_state(hdev);
10864
10865         /* Log and clear the hw errors those already occurred */
10866         hclge_handle_all_hns_hw_errors(ae_dev);
10867
10868         /* request delayed reset for the error recovery because an immediate
10869          * global reset on a PF affecting pending initialization of other PFs
10870          */
10871         if (ae_dev->hw_err_reset_req) {
10872                 enum hnae3_reset_type reset_level;
10873
10874                 reset_level = hclge_get_reset_level(ae_dev,
10875                                                     &ae_dev->hw_err_reset_req);
10876                 hclge_set_def_reset_request(ae_dev, reset_level);
10877                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10878         }
10879
10880         /* Enable MISC vector(vector0) */
10881         hclge_enable_vector(&hdev->misc_vector, true);
10882
10883         hclge_state_init(hdev);
10884         hdev->last_reset_time = jiffies;
10885
10886         dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10887                  HCLGE_DRIVER_NAME);
10888
10889         hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10890
10891         return 0;
10892
10893 err_mdiobus_unreg:
10894         if (hdev->hw.mac.phydev)
10895                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
10896 err_msi_irq_uninit:
10897         hclge_misc_irq_uninit(hdev);
10898 err_msi_uninit:
10899         pci_free_irq_vectors(pdev);
10900 err_cmd_uninit:
10901         hclge_cmd_uninit(hdev);
10902 err_pci_uninit:
10903         pcim_iounmap(pdev, hdev->hw.io_base);
10904         pci_clear_master(pdev);
10905         pci_release_regions(pdev);
10906         pci_disable_device(pdev);
10907 out:
10908         mutex_destroy(&hdev->vport_lock);
10909         return ret;
10910 }
10911
10912 static void hclge_stats_clear(struct hclge_dev *hdev)
10913 {
10914         memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
10915 }
10916
10917 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10918 {
10919         return hclge_config_switch_param(hdev, vf, enable,
10920                                          HCLGE_SWITCH_ANTI_SPOOF_MASK);
10921 }
10922
10923 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10924 {
10925         return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10926                                           HCLGE_FILTER_FE_NIC_INGRESS_B,
10927                                           enable, vf);
10928 }
10929
10930 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10931 {
10932         int ret;
10933
10934         ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10935         if (ret) {
10936                 dev_err(&hdev->pdev->dev,
10937                         "Set vf %d mac spoof check %s failed, ret=%d\n",
10938                         vf, enable ? "on" : "off", ret);
10939                 return ret;
10940         }
10941
10942         ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10943         if (ret)
10944                 dev_err(&hdev->pdev->dev,
10945                         "Set vf %d vlan spoof check %s failed, ret=%d\n",
10946                         vf, enable ? "on" : "off", ret);
10947
10948         return ret;
10949 }
10950
10951 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10952                                  bool enable)
10953 {
10954         struct hclge_vport *vport = hclge_get_vport(handle);
10955         struct hclge_dev *hdev = vport->back;
10956         u32 new_spoofchk = enable ? 1 : 0;
10957         int ret;
10958
10959         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10960                 return -EOPNOTSUPP;
10961
10962         vport = hclge_get_vf_vport(hdev, vf);
10963         if (!vport)
10964                 return -EINVAL;
10965
10966         if (vport->vf_info.spoofchk == new_spoofchk)
10967                 return 0;
10968
10969         if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
10970                 dev_warn(&hdev->pdev->dev,
10971                          "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
10972                          vf);
10973         else if (enable && hclge_is_umv_space_full(vport, true))
10974                 dev_warn(&hdev->pdev->dev,
10975                          "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
10976                          vf);
10977
10978         ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
10979         if (ret)
10980                 return ret;
10981
10982         vport->vf_info.spoofchk = new_spoofchk;
10983         return 0;
10984 }
10985
10986 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
10987 {
10988         struct hclge_vport *vport = hdev->vport;
10989         int ret;
10990         int i;
10991
10992         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10993                 return 0;
10994
10995         /* resume the vf spoof check state after reset */
10996         for (i = 0; i < hdev->num_alloc_vport; i++) {
10997                 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
10998                                                vport->vf_info.spoofchk);
10999                 if (ret)
11000                         return ret;
11001
11002                 vport++;
11003         }
11004
11005         return 0;
11006 }
11007
11008 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11009 {
11010         struct hclge_vport *vport = hclge_get_vport(handle);
11011         struct hclge_dev *hdev = vport->back;
11012         struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
11013         u32 new_trusted = enable ? 1 : 0;
11014         bool en_bc_pmc;
11015         int ret;
11016
11017         vport = hclge_get_vf_vport(hdev, vf);
11018         if (!vport)
11019                 return -EINVAL;
11020
11021         if (vport->vf_info.trusted == new_trusted)
11022                 return 0;
11023
11024         /* Disable promisc mode for VF if it is not trusted any more. */
11025         if (!enable && vport->vf_info.promisc_enable) {
11026                 en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
11027                 ret = hclge_set_vport_promisc_mode(vport, false, false,
11028                                                    en_bc_pmc);
11029                 if (ret)
11030                         return ret;
11031                 vport->vf_info.promisc_enable = 0;
11032                 hclge_inform_vf_promisc_info(vport);
11033         }
11034
11035         vport->vf_info.trusted = new_trusted;
11036
11037         return 0;
11038 }
11039
11040 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11041 {
11042         int ret;
11043         int vf;
11044
11045         /* reset vf rate to default value */
11046         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11047                 struct hclge_vport *vport = &hdev->vport[vf];
11048
11049                 vport->vf_info.max_tx_rate = 0;
11050                 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11051                 if (ret)
11052                         dev_err(&hdev->pdev->dev,
11053                                 "vf%d failed to reset to default, ret=%d\n",
11054                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
11055         }
11056 }
11057
11058 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11059                                      int min_tx_rate, int max_tx_rate)
11060 {
11061         if (min_tx_rate != 0 ||
11062             max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11063                 dev_err(&hdev->pdev->dev,
11064                         "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11065                         min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11066                 return -EINVAL;
11067         }
11068
11069         return 0;
11070 }
11071
11072 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11073                              int min_tx_rate, int max_tx_rate, bool force)
11074 {
11075         struct hclge_vport *vport = hclge_get_vport(handle);
11076         struct hclge_dev *hdev = vport->back;
11077         int ret;
11078
11079         ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11080         if (ret)
11081                 return ret;
11082
11083         vport = hclge_get_vf_vport(hdev, vf);
11084         if (!vport)
11085                 return -EINVAL;
11086
11087         if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11088                 return 0;
11089
11090         ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11091         if (ret)
11092                 return ret;
11093
11094         vport->vf_info.max_tx_rate = max_tx_rate;
11095
11096         return 0;
11097 }
11098
11099 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11100 {
11101         struct hnae3_handle *handle = &hdev->vport->nic;
11102         struct hclge_vport *vport;
11103         int ret;
11104         int vf;
11105
11106         /* resume the vf max_tx_rate after reset */
11107         for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11108                 vport = hclge_get_vf_vport(hdev, vf);
11109                 if (!vport)
11110                         return -EINVAL;
11111
11112                 /* zero means max rate, after reset, firmware already set it to
11113                  * max rate, so just continue.
11114                  */
11115                 if (!vport->vf_info.max_tx_rate)
11116                         continue;
11117
11118                 ret = hclge_set_vf_rate(handle, vf, 0,
11119                                         vport->vf_info.max_tx_rate, true);
11120                 if (ret) {
11121                         dev_err(&hdev->pdev->dev,
11122                                 "vf%d failed to resume tx_rate:%u, ret=%d\n",
11123                                 vf, vport->vf_info.max_tx_rate, ret);
11124                         return ret;
11125                 }
11126         }
11127
11128         return 0;
11129 }
11130
11131 static void hclge_reset_vport_state(struct hclge_dev *hdev)
11132 {
11133         struct hclge_vport *vport = hdev->vport;
11134         int i;
11135
11136         for (i = 0; i < hdev->num_alloc_vport; i++) {
11137                 hclge_vport_stop(vport);
11138                 vport++;
11139         }
11140 }
11141
11142 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11143 {
11144         struct hclge_dev *hdev = ae_dev->priv;
11145         struct pci_dev *pdev = ae_dev->pdev;
11146         int ret;
11147
11148         set_bit(HCLGE_STATE_DOWN, &hdev->state);
11149
11150         hclge_stats_clear(hdev);
11151         /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11152          * so here should not clean table in memory.
11153          */
11154         if (hdev->reset_type == HNAE3_IMP_RESET ||
11155             hdev->reset_type == HNAE3_GLOBAL_RESET) {
11156                 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11157                 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11158                 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11159                 hclge_reset_umv_space(hdev);
11160         }
11161
11162         ret = hclge_cmd_init(hdev);
11163         if (ret) {
11164                 dev_err(&pdev->dev, "Cmd queue init failed\n");
11165                 return ret;
11166         }
11167
11168         ret = hclge_map_tqp(hdev);
11169         if (ret) {
11170                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11171                 return ret;
11172         }
11173
11174         ret = hclge_mac_init(hdev);
11175         if (ret) {
11176                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11177                 return ret;
11178         }
11179
11180         ret = hclge_tp_port_init(hdev);
11181         if (ret) {
11182                 dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
11183                         ret);
11184                 return ret;
11185         }
11186
11187         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11188         if (ret) {
11189                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11190                 return ret;
11191         }
11192
11193         ret = hclge_config_gro(hdev, true);
11194         if (ret)
11195                 return ret;
11196
11197         ret = hclge_init_vlan_config(hdev);
11198         if (ret) {
11199                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11200                 return ret;
11201         }
11202
11203         ret = hclge_tm_init_hw(hdev, true);
11204         if (ret) {
11205                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
11206                 return ret;
11207         }
11208
11209         ret = hclge_rss_init_hw(hdev);
11210         if (ret) {
11211                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11212                 return ret;
11213         }
11214
11215         ret = init_mgr_tbl(hdev);
11216         if (ret) {
11217                 dev_err(&pdev->dev,
11218                         "failed to reinit manager table, ret = %d\n", ret);
11219                 return ret;
11220         }
11221
11222         ret = hclge_init_fd_config(hdev);
11223         if (ret) {
11224                 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
11225                 return ret;
11226         }
11227
11228         /* Log and clear the hw errors those already occurred */
11229         hclge_handle_all_hns_hw_errors(ae_dev);
11230
11231         /* Re-enable the hw error interrupts because
11232          * the interrupts get disabled on global reset.
11233          */
11234         ret = hclge_config_nic_hw_error(hdev, true);
11235         if (ret) {
11236                 dev_err(&pdev->dev,
11237                         "fail(%d) to re-enable NIC hw error interrupts\n",
11238                         ret);
11239                 return ret;
11240         }
11241
11242         if (hdev->roce_client) {
11243                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
11244                 if (ret) {
11245                         dev_err(&pdev->dev,
11246                                 "fail(%d) to re-enable roce ras interrupts\n",
11247                                 ret);
11248                         return ret;
11249                 }
11250         }
11251
11252         hclge_reset_vport_state(hdev);
11253         ret = hclge_reset_vport_spoofchk(hdev);
11254         if (ret)
11255                 return ret;
11256
11257         ret = hclge_resume_vf_rate(hdev);
11258         if (ret)
11259                 return ret;
11260
11261         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
11262                  HCLGE_DRIVER_NAME);
11263
11264         return 0;
11265 }
11266
11267 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
11268 {
11269         struct hclge_dev *hdev = ae_dev->priv;
11270         struct hclge_mac *mac = &hdev->hw.mac;
11271
11272         hclge_reset_vf_rate(hdev);
11273         hclge_clear_vf_vlan(hdev);
11274         hclge_misc_affinity_teardown(hdev);
11275         hclge_state_uninit(hdev);
11276         hclge_uninit_mac_table(hdev);
11277
11278         if (mac->phydev)
11279                 mdiobus_unregister(mac->mdio_bus);
11280
11281         /* Disable MISC vector(vector0) */
11282         hclge_enable_vector(&hdev->misc_vector, false);
11283         synchronize_irq(hdev->misc_vector.vector_irq);
11284
11285         /* Disable all hw interrupts */
11286         hclge_config_mac_tnl_int(hdev, false);
11287         hclge_config_nic_hw_error(hdev, false);
11288         hclge_config_rocee_ras_interrupt(hdev, false);
11289
11290         hclge_cmd_uninit(hdev);
11291         hclge_misc_irq_uninit(hdev);
11292         hclge_pci_uninit(hdev);
11293         mutex_destroy(&hdev->vport_lock);
11294         hclge_uninit_vport_vlan_table(hdev);
11295         ae_dev->priv = NULL;
11296 }
11297
11298 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
11299 {
11300         struct hclge_vport *vport = hclge_get_vport(handle);
11301         struct hclge_dev *hdev = vport->back;
11302
11303         return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
11304 }
11305
11306 static void hclge_get_channels(struct hnae3_handle *handle,
11307                                struct ethtool_channels *ch)
11308 {
11309         ch->max_combined = hclge_get_max_channels(handle);
11310         ch->other_count = 1;
11311         ch->max_other = 1;
11312         ch->combined_count = handle->kinfo.rss_size;
11313 }
11314
11315 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
11316                                         u16 *alloc_tqps, u16 *max_rss_size)
11317 {
11318         struct hclge_vport *vport = hclge_get_vport(handle);
11319         struct hclge_dev *hdev = vport->back;
11320
11321         *alloc_tqps = vport->alloc_tqps;
11322         *max_rss_size = hdev->pf_rss_size_max;
11323 }
11324
11325 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
11326                               bool rxfh_configured)
11327 {
11328         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
11329         struct hclge_vport *vport = hclge_get_vport(handle);
11330         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
11331         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
11332         struct hclge_dev *hdev = vport->back;
11333         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
11334         u16 cur_rss_size = kinfo->rss_size;
11335         u16 cur_tqps = kinfo->num_tqps;
11336         u16 tc_valid[HCLGE_MAX_TC_NUM];
11337         u16 roundup_size;
11338         u32 *rss_indir;
11339         unsigned int i;
11340         int ret;
11341
11342         kinfo->req_rss_size = new_tqps_num;
11343
11344         ret = hclge_tm_vport_map_update(hdev);
11345         if (ret) {
11346                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
11347                 return ret;
11348         }
11349
11350         roundup_size = roundup_pow_of_two(kinfo->rss_size);
11351         roundup_size = ilog2(roundup_size);
11352         /* Set the RSS TC mode according to the new RSS size */
11353         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
11354                 tc_valid[i] = 0;
11355
11356                 if (!(hdev->hw_tc_map & BIT(i)))
11357                         continue;
11358
11359                 tc_valid[i] = 1;
11360                 tc_size[i] = roundup_size;
11361                 tc_offset[i] = kinfo->rss_size * i;
11362         }
11363         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
11364         if (ret)
11365                 return ret;
11366
11367         /* RSS indirection table has been configuared by user */
11368         if (rxfh_configured)
11369                 goto out;
11370
11371         /* Reinitializes the rss indirect table according to the new RSS size */
11372         rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
11373                             GFP_KERNEL);
11374         if (!rss_indir)
11375                 return -ENOMEM;
11376
11377         for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
11378                 rss_indir[i] = i % kinfo->rss_size;
11379
11380         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
11381         if (ret)
11382                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
11383                         ret);
11384
11385         kfree(rss_indir);
11386
11387 out:
11388         if (!ret)
11389                 dev_info(&hdev->pdev->dev,
11390                          "Channels changed, rss_size from %u to %u, tqps from %u to %u",
11391                          cur_rss_size, kinfo->rss_size,
11392                          cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
11393
11394         return ret;
11395 }
11396
11397 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
11398                               u32 *regs_num_64_bit)
11399 {
11400         struct hclge_desc desc;
11401         u32 total_num;
11402         int ret;
11403
11404         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
11405         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11406         if (ret) {
11407                 dev_err(&hdev->pdev->dev,
11408                         "Query register number cmd failed, ret = %d.\n", ret);
11409                 return ret;
11410         }
11411
11412         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
11413         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
11414
11415         total_num = *regs_num_32_bit + *regs_num_64_bit;
11416         if (!total_num)
11417                 return -EINVAL;
11418
11419         return 0;
11420 }
11421
11422 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11423                                  void *data)
11424 {
11425 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
11426 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
11427
11428         struct hclge_desc *desc;
11429         u32 *reg_val = data;
11430         __le32 *desc_data;
11431         int nodata_num;
11432         int cmd_num;
11433         int i, k, n;
11434         int ret;
11435
11436         if (regs_num == 0)
11437                 return 0;
11438
11439         nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
11440         cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
11441                                HCLGE_32_BIT_REG_RTN_DATANUM);
11442         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11443         if (!desc)
11444                 return -ENOMEM;
11445
11446         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
11447         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11448         if (ret) {
11449                 dev_err(&hdev->pdev->dev,
11450                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
11451                 kfree(desc);
11452                 return ret;
11453         }
11454
11455         for (i = 0; i < cmd_num; i++) {
11456                 if (i == 0) {
11457                         desc_data = (__le32 *)(&desc[i].data[0]);
11458                         n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
11459                 } else {
11460                         desc_data = (__le32 *)(&desc[i]);
11461                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
11462                 }
11463                 for (k = 0; k < n; k++) {
11464                         *reg_val++ = le32_to_cpu(*desc_data++);
11465
11466                         regs_num--;
11467                         if (!regs_num)
11468                                 break;
11469                 }
11470         }
11471
11472         kfree(desc);
11473         return 0;
11474 }
11475
11476 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11477                                  void *data)
11478 {
11479 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
11480 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
11481
11482         struct hclge_desc *desc;
11483         u64 *reg_val = data;
11484         __le64 *desc_data;
11485         int nodata_len;
11486         int cmd_num;
11487         int i, k, n;
11488         int ret;
11489
11490         if (regs_num == 0)
11491                 return 0;
11492
11493         nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
11494         cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
11495                                HCLGE_64_BIT_REG_RTN_DATANUM);
11496         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11497         if (!desc)
11498                 return -ENOMEM;
11499
11500         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
11501         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11502         if (ret) {
11503                 dev_err(&hdev->pdev->dev,
11504                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
11505                 kfree(desc);
11506                 return ret;
11507         }
11508
11509         for (i = 0; i < cmd_num; i++) {
11510                 if (i == 0) {
11511                         desc_data = (__le64 *)(&desc[i].data[0]);
11512                         n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
11513                 } else {
11514                         desc_data = (__le64 *)(&desc[i]);
11515                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
11516                 }
11517                 for (k = 0; k < n; k++) {
11518                         *reg_val++ = le64_to_cpu(*desc_data++);
11519
11520                         regs_num--;
11521                         if (!regs_num)
11522                                 break;
11523                 }
11524         }
11525
11526         kfree(desc);
11527         return 0;
11528 }
11529
11530 #define MAX_SEPARATE_NUM        4
11531 #define SEPARATOR_VALUE         0xFDFCFBFA
11532 #define REG_NUM_PER_LINE        4
11533 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
11534 #define REG_SEPARATOR_LINE      1
11535 #define REG_NUM_REMAIN_MASK     3
11536 #define BD_LIST_MAX_NUM         30
11537
11538 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
11539 {
11540         int i;
11541
11542         /* initialize command BD except the last one */
11543         for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
11544                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
11545                                            true);
11546                 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11547         }
11548
11549         /* initialize the last command BD */
11550         hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
11551
11552         return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
11553 }
11554
11555 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
11556                                     int *bd_num_list,
11557                                     u32 type_num)
11558 {
11559         u32 entries_per_desc, desc_index, index, offset, i;
11560         struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
11561         int ret;
11562
11563         ret = hclge_query_bd_num_cmd_send(hdev, desc);
11564         if (ret) {
11565                 dev_err(&hdev->pdev->dev,
11566                         "Get dfx bd num fail, status is %d.\n", ret);
11567                 return ret;
11568         }
11569
11570         entries_per_desc = ARRAY_SIZE(desc[0].data);
11571         for (i = 0; i < type_num; i++) {
11572                 offset = hclge_dfx_bd_offset_list[i];
11573                 index = offset % entries_per_desc;
11574                 desc_index = offset / entries_per_desc;
11575                 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
11576         }
11577
11578         return ret;
11579 }
11580
11581 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
11582                                   struct hclge_desc *desc_src, int bd_num,
11583                                   enum hclge_opcode_type cmd)
11584 {
11585         struct hclge_desc *desc = desc_src;
11586         int i, ret;
11587
11588         hclge_cmd_setup_basic_desc(desc, cmd, true);
11589         for (i = 0; i < bd_num - 1; i++) {
11590                 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11591                 desc++;
11592                 hclge_cmd_setup_basic_desc(desc, cmd, true);
11593         }
11594
11595         desc = desc_src;
11596         ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
11597         if (ret)
11598                 dev_err(&hdev->pdev->dev,
11599                         "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
11600                         cmd, ret);
11601
11602         return ret;
11603 }
11604
11605 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
11606                                     void *data)
11607 {
11608         int entries_per_desc, reg_num, separator_num, desc_index, index, i;
11609         struct hclge_desc *desc = desc_src;
11610         u32 *reg = data;
11611
11612         entries_per_desc = ARRAY_SIZE(desc->data);
11613         reg_num = entries_per_desc * bd_num;
11614         separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
11615         for (i = 0; i < reg_num; i++) {
11616                 index = i % entries_per_desc;
11617                 desc_index = i / entries_per_desc;
11618                 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
11619         }
11620         for (i = 0; i < separator_num; i++)
11621                 *reg++ = SEPARATOR_VALUE;
11622
11623         return reg_num + separator_num;
11624 }
11625
11626 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
11627 {
11628         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11629         int data_len_per_desc, bd_num, i;
11630         int bd_num_list[BD_LIST_MAX_NUM];
11631         u32 data_len;
11632         int ret;
11633
11634         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11635         if (ret) {
11636                 dev_err(&hdev->pdev->dev,
11637                         "Get dfx reg bd num fail, status is %d.\n", ret);
11638                 return ret;
11639         }
11640
11641         data_len_per_desc = sizeof_field(struct hclge_desc, data);
11642         *len = 0;
11643         for (i = 0; i < dfx_reg_type_num; i++) {
11644                 bd_num = bd_num_list[i];
11645                 data_len = data_len_per_desc * bd_num;
11646                 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
11647         }
11648
11649         return ret;
11650 }
11651
11652 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
11653 {
11654         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11655         int bd_num, bd_num_max, buf_len, i;
11656         int bd_num_list[BD_LIST_MAX_NUM];
11657         struct hclge_desc *desc_src;
11658         u32 *reg = data;
11659         int ret;
11660
11661         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11662         if (ret) {
11663                 dev_err(&hdev->pdev->dev,
11664                         "Get dfx reg bd num fail, status is %d.\n", ret);
11665                 return ret;
11666         }
11667
11668         bd_num_max = bd_num_list[0];
11669         for (i = 1; i < dfx_reg_type_num; i++)
11670                 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
11671
11672         buf_len = sizeof(*desc_src) * bd_num_max;
11673         desc_src = kzalloc(buf_len, GFP_KERNEL);
11674         if (!desc_src)
11675                 return -ENOMEM;
11676
11677         for (i = 0; i < dfx_reg_type_num; i++) {
11678                 bd_num = bd_num_list[i];
11679                 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
11680                                              hclge_dfx_reg_opcode_list[i]);
11681                 if (ret) {
11682                         dev_err(&hdev->pdev->dev,
11683                                 "Get dfx reg fail, status is %d.\n", ret);
11684                         break;
11685                 }
11686
11687                 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
11688         }
11689
11690         kfree(desc_src);
11691         return ret;
11692 }
11693
11694 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
11695                               struct hnae3_knic_private_info *kinfo)
11696 {
11697 #define HCLGE_RING_REG_OFFSET           0x200
11698 #define HCLGE_RING_INT_REG_OFFSET       0x4
11699
11700         int i, j, reg_num, separator_num;
11701         int data_num_sum;
11702         u32 *reg = data;
11703
11704         /* fetching per-PF registers valus from PF PCIe register space */
11705         reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
11706         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11707         for (i = 0; i < reg_num; i++)
11708                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
11709         for (i = 0; i < separator_num; i++)
11710                 *reg++ = SEPARATOR_VALUE;
11711         data_num_sum = reg_num + separator_num;
11712
11713         reg_num = ARRAY_SIZE(common_reg_addr_list);
11714         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11715         for (i = 0; i < reg_num; i++)
11716                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
11717         for (i = 0; i < separator_num; i++)
11718                 *reg++ = SEPARATOR_VALUE;
11719         data_num_sum += reg_num + separator_num;
11720
11721         reg_num = ARRAY_SIZE(ring_reg_addr_list);
11722         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11723         for (j = 0; j < kinfo->num_tqps; j++) {
11724                 for (i = 0; i < reg_num; i++)
11725                         *reg++ = hclge_read_dev(&hdev->hw,
11726                                                 ring_reg_addr_list[i] +
11727                                                 HCLGE_RING_REG_OFFSET * j);
11728                 for (i = 0; i < separator_num; i++)
11729                         *reg++ = SEPARATOR_VALUE;
11730         }
11731         data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
11732
11733         reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
11734         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11735         for (j = 0; j < hdev->num_msi_used - 1; j++) {
11736                 for (i = 0; i < reg_num; i++)
11737                         *reg++ = hclge_read_dev(&hdev->hw,
11738                                                 tqp_intr_reg_addr_list[i] +
11739                                                 HCLGE_RING_INT_REG_OFFSET * j);
11740                 for (i = 0; i < separator_num; i++)
11741                         *reg++ = SEPARATOR_VALUE;
11742         }
11743         data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
11744
11745         return data_num_sum;
11746 }
11747
11748 static int hclge_get_regs_len(struct hnae3_handle *handle)
11749 {
11750         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
11751         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11752         struct hclge_vport *vport = hclge_get_vport(handle);
11753         struct hclge_dev *hdev = vport->back;
11754         int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
11755         int regs_lines_32_bit, regs_lines_64_bit;
11756         int ret;
11757
11758         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
11759         if (ret) {
11760                 dev_err(&hdev->pdev->dev,
11761                         "Get register number failed, ret = %d.\n", ret);
11762                 return ret;
11763         }
11764
11765         ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
11766         if (ret) {
11767                 dev_err(&hdev->pdev->dev,
11768                         "Get dfx reg len failed, ret = %d.\n", ret);
11769                 return ret;
11770         }
11771
11772         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
11773                 REG_SEPARATOR_LINE;
11774         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
11775                 REG_SEPARATOR_LINE;
11776         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
11777                 REG_SEPARATOR_LINE;
11778         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
11779                 REG_SEPARATOR_LINE;
11780         regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
11781                 REG_SEPARATOR_LINE;
11782         regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
11783                 REG_SEPARATOR_LINE;
11784
11785         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
11786                 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
11787                 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
11788 }
11789
11790 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
11791                            void *data)
11792 {
11793         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11794         struct hclge_vport *vport = hclge_get_vport(handle);
11795         struct hclge_dev *hdev = vport->back;
11796         u32 regs_num_32_bit, regs_num_64_bit;
11797         int i, reg_num, separator_num, ret;
11798         u32 *reg = data;
11799
11800         *version = hdev->fw_version;
11801
11802         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
11803         if (ret) {
11804                 dev_err(&hdev->pdev->dev,
11805                         "Get register number failed, ret = %d.\n", ret);
11806                 return;
11807         }
11808
11809         reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
11810
11811         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
11812         if (ret) {
11813                 dev_err(&hdev->pdev->dev,
11814                         "Get 32 bit register failed, ret = %d.\n", ret);
11815                 return;
11816         }
11817         reg_num = regs_num_32_bit;
11818         reg += reg_num;
11819         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11820         for (i = 0; i < separator_num; i++)
11821                 *reg++ = SEPARATOR_VALUE;
11822
11823         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
11824         if (ret) {
11825                 dev_err(&hdev->pdev->dev,
11826                         "Get 64 bit register failed, ret = %d.\n", ret);
11827                 return;
11828         }
11829         reg_num = regs_num_64_bit * 2;
11830         reg += reg_num;
11831         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11832         for (i = 0; i < separator_num; i++)
11833                 *reg++ = SEPARATOR_VALUE;
11834
11835         ret = hclge_get_dfx_reg(hdev, reg);
11836         if (ret)
11837                 dev_err(&hdev->pdev->dev,
11838                         "Get dfx register failed, ret = %d.\n", ret);
11839 }
11840
11841 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
11842 {
11843         struct hclge_set_led_state_cmd *req;
11844         struct hclge_desc desc;
11845         int ret;
11846
11847         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11848
11849         req = (struct hclge_set_led_state_cmd *)desc.data;
11850         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11851                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
11852
11853         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11854         if (ret)
11855                 dev_err(&hdev->pdev->dev,
11856                         "Send set led state cmd error, ret =%d\n", ret);
11857
11858         return ret;
11859 }
11860
11861 enum hclge_led_status {
11862         HCLGE_LED_OFF,
11863         HCLGE_LED_ON,
11864         HCLGE_LED_NO_CHANGE = 0xFF,
11865 };
11866
11867 static int hclge_set_led_id(struct hnae3_handle *handle,
11868                             enum ethtool_phys_id_state status)
11869 {
11870         struct hclge_vport *vport = hclge_get_vport(handle);
11871         struct hclge_dev *hdev = vport->back;
11872
11873         switch (status) {
11874         case ETHTOOL_ID_ACTIVE:
11875                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
11876         case ETHTOOL_ID_INACTIVE:
11877                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
11878         default:
11879                 return -EINVAL;
11880         }
11881 }
11882
11883 static void hclge_get_link_mode(struct hnae3_handle *handle,
11884                                 unsigned long *supported,
11885                                 unsigned long *advertising)
11886 {
11887         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11888         struct hclge_vport *vport = hclge_get_vport(handle);
11889         struct hclge_dev *hdev = vport->back;
11890         unsigned int idx = 0;
11891
11892         for (; idx < size; idx++) {
11893                 supported[idx] = hdev->hw.mac.supported[idx];
11894                 advertising[idx] = hdev->hw.mac.advertising[idx];
11895         }
11896 }
11897
11898 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
11899 {
11900         struct hclge_vport *vport = hclge_get_vport(handle);
11901         struct hclge_dev *hdev = vport->back;
11902
11903         return hclge_config_gro(hdev, enable);
11904 }
11905
11906 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11907 {
11908         struct hclge_vport *vport = &hdev->vport[0];
11909         struct hnae3_handle *handle = &vport->nic;
11910         u8 tmp_flags;
11911         int ret;
11912
11913         if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11914                 set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11915                 vport->last_promisc_flags = vport->overflow_promisc_flags;
11916         }
11917
11918         if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11919                 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11920                 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11921                                              tmp_flags & HNAE3_MPE);
11922                 if (!ret) {
11923                         clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11924                         hclge_enable_vlan_filter(handle,
11925                                                  tmp_flags & HNAE3_VLAN_FLTR);
11926                 }
11927         }
11928 }
11929
11930 static bool hclge_module_existed(struct hclge_dev *hdev)
11931 {
11932         struct hclge_desc desc;
11933         u32 existed;
11934         int ret;
11935
11936         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11937         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11938         if (ret) {
11939                 dev_err(&hdev->pdev->dev,
11940                         "failed to get SFP exist state, ret = %d\n", ret);
11941                 return false;
11942         }
11943
11944         existed = le32_to_cpu(desc.data[0]);
11945
11946         return existed != 0;
11947 }
11948
11949 /* need 6 bds(total 140 bytes) in one reading
11950  * return the number of bytes actually read, 0 means read failed.
11951  */
11952 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
11953                                      u32 len, u8 *data)
11954 {
11955         struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
11956         struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
11957         u16 read_len;
11958         u16 copy_len;
11959         int ret;
11960         int i;
11961
11962         /* setup all 6 bds to read module eeprom info. */
11963         for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11964                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
11965                                            true);
11966
11967                 /* bd0~bd4 need next flag */
11968                 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
11969                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11970         }
11971
11972         /* setup bd0, this bd contains offset and read length. */
11973         sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
11974         sfp_info_bd0->offset = cpu_to_le16((u16)offset);
11975         read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
11976         sfp_info_bd0->read_len = cpu_to_le16(read_len);
11977
11978         ret = hclge_cmd_send(&hdev->hw, desc, i);
11979         if (ret) {
11980                 dev_err(&hdev->pdev->dev,
11981                         "failed to get SFP eeprom info, ret = %d\n", ret);
11982                 return 0;
11983         }
11984
11985         /* copy sfp info from bd0 to out buffer. */
11986         copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
11987         memcpy(data, sfp_info_bd0->data, copy_len);
11988         read_len = copy_len;
11989
11990         /* copy sfp info from bd1~bd5 to out buffer if needed. */
11991         for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11992                 if (read_len >= len)
11993                         return read_len;
11994
11995                 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
11996                 memcpy(data + read_len, desc[i].data, copy_len);
11997                 read_len += copy_len;
11998         }
11999
12000         return read_len;
12001 }
12002
12003 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12004                                    u32 len, u8 *data)
12005 {
12006         struct hclge_vport *vport = hclge_get_vport(handle);
12007         struct hclge_dev *hdev = vport->back;
12008         u32 read_len = 0;
12009         u16 data_len;
12010
12011         if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12012                 return -EOPNOTSUPP;
12013
12014         if (!hclge_module_existed(hdev))
12015                 return -ENXIO;
12016
12017         while (read_len < len) {
12018                 data_len = hclge_get_sfp_eeprom_info(hdev,
12019                                                      offset + read_len,
12020                                                      len - read_len,
12021                                                      data + read_len);
12022                 if (!data_len)
12023                         return -EIO;
12024
12025                 read_len += data_len;
12026         }
12027
12028         return 0;
12029 }
12030
12031 static const struct hnae3_ae_ops hclge_ops = {
12032         .init_ae_dev = hclge_init_ae_dev,
12033         .uninit_ae_dev = hclge_uninit_ae_dev,
12034         .flr_prepare = hclge_flr_prepare,
12035         .flr_done = hclge_flr_done,
12036         .init_client_instance = hclge_init_client_instance,
12037         .uninit_client_instance = hclge_uninit_client_instance,
12038         .map_ring_to_vector = hclge_map_ring_to_vector,
12039         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12040         .get_vector = hclge_get_vector,
12041         .put_vector = hclge_put_vector,
12042         .set_promisc_mode = hclge_set_promisc_mode,
12043         .request_update_promisc_mode = hclge_request_update_promisc_mode,
12044         .set_loopback = hclge_set_loopback,
12045         .start = hclge_ae_start,
12046         .stop = hclge_ae_stop,
12047         .client_start = hclge_client_start,
12048         .client_stop = hclge_client_stop,
12049         .get_status = hclge_get_status,
12050         .get_ksettings_an_result = hclge_get_ksettings_an_result,
12051         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12052         .get_media_type = hclge_get_media_type,
12053         .check_port_speed = hclge_check_port_speed,
12054         .get_fec = hclge_get_fec,
12055         .set_fec = hclge_set_fec,
12056         .get_rss_key_size = hclge_get_rss_key_size,
12057         .get_rss = hclge_get_rss,
12058         .set_rss = hclge_set_rss,
12059         .set_rss_tuple = hclge_set_rss_tuple,
12060         .get_rss_tuple = hclge_get_rss_tuple,
12061         .get_tc_size = hclge_get_tc_size,
12062         .get_mac_addr = hclge_get_mac_addr,
12063         .set_mac_addr = hclge_set_mac_addr,
12064         .do_ioctl = hclge_do_ioctl,
12065         .add_uc_addr = hclge_add_uc_addr,
12066         .rm_uc_addr = hclge_rm_uc_addr,
12067         .add_mc_addr = hclge_add_mc_addr,
12068         .rm_mc_addr = hclge_rm_mc_addr,
12069         .set_autoneg = hclge_set_autoneg,
12070         .get_autoneg = hclge_get_autoneg,
12071         .restart_autoneg = hclge_restart_autoneg,
12072         .halt_autoneg = hclge_halt_autoneg,
12073         .get_pauseparam = hclge_get_pauseparam,
12074         .set_pauseparam = hclge_set_pauseparam,
12075         .set_mtu = hclge_set_mtu,
12076         .reset_queue = hclge_reset_tqp,
12077         .get_stats = hclge_get_stats,
12078         .get_mac_stats = hclge_get_mac_stat,
12079         .update_stats = hclge_update_stats,
12080         .get_strings = hclge_get_strings,
12081         .get_sset_count = hclge_get_sset_count,
12082         .get_fw_version = hclge_get_fw_version,
12083         .get_mdix_mode = hclge_get_mdix_mode,
12084         .enable_vlan_filter = hclge_enable_vlan_filter,
12085         .set_vlan_filter = hclge_set_vlan_filter,
12086         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12087         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
12088         .reset_event = hclge_reset_event,
12089         .get_reset_level = hclge_get_reset_level,
12090         .set_default_reset_request = hclge_set_def_reset_request,
12091         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
12092         .set_channels = hclge_set_channels,
12093         .get_channels = hclge_get_channels,
12094         .get_regs_len = hclge_get_regs_len,
12095         .get_regs = hclge_get_regs,
12096         .set_led_id = hclge_set_led_id,
12097         .get_link_mode = hclge_get_link_mode,
12098         .add_fd_entry = hclge_add_fd_entry,
12099         .del_fd_entry = hclge_del_fd_entry,
12100         .del_all_fd_entries = hclge_del_all_fd_entries,
12101         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
12102         .get_fd_rule_info = hclge_get_fd_rule_info,
12103         .get_fd_all_rules = hclge_get_all_rules,
12104         .enable_fd = hclge_enable_fd,
12105         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
12106         .dbg_run_cmd = hclge_dbg_run_cmd,
12107         .dbg_read_cmd = hclge_dbg_read_cmd,
12108         .handle_hw_ras_error = hclge_handle_hw_ras_error,
12109         .get_hw_reset_stat = hclge_get_hw_reset_stat,
12110         .ae_dev_resetting = hclge_ae_dev_resetting,
12111         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
12112         .set_gro_en = hclge_gro_en,
12113         .get_global_queue_id = hclge_covert_handle_qid_global,
12114         .set_timer_task = hclge_set_timer_task,
12115         .mac_connect_phy = hclge_mac_connect_phy,
12116         .mac_disconnect_phy = hclge_mac_disconnect_phy,
12117         .get_vf_config = hclge_get_vf_config,
12118         .set_vf_link_state = hclge_set_vf_link_state,
12119         .set_vf_spoofchk = hclge_set_vf_spoofchk,
12120         .set_vf_trust = hclge_set_vf_trust,
12121         .set_vf_rate = hclge_set_vf_rate,
12122         .set_vf_mac = hclge_set_vf_mac,
12123         .get_module_eeprom = hclge_get_module_eeprom,
12124         .get_cmdq_stat = hclge_get_cmdq_stat,
12125         .add_cls_flower = hclge_add_cls_flower,
12126         .del_cls_flower = hclge_del_cls_flower,
12127         .cls_flower_active = hclge_is_cls_flower_active,
12128         .get_phy_link_ksettings = hclge_get_phy_link_ksettings,
12129         .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
12130 };
12131
12132 static struct hnae3_ae_algo ae_algo = {
12133         .ops = &hclge_ops,
12134         .pdev_id_table = ae_algo_pci_tbl,
12135 };
12136
12137 static int hclge_init(void)
12138 {
12139         pr_info("%s is initializing\n", HCLGE_NAME);
12140
12141         hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
12142         if (!hclge_wq) {
12143                 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
12144                 return -ENOMEM;
12145         }
12146
12147         hnae3_register_ae_algo(&ae_algo);
12148
12149         return 0;
12150 }
12151
12152 static void hclge_exit(void)
12153 {
12154         hnae3_unregister_ae_algo(&ae_algo);
12155         destroy_workqueue(hclge_wq);
12156 }
12157 module_init(hclge_init);
12158 module_exit(hclge_exit);
12159
12160 MODULE_LICENSE("GPL");
12161 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
12162 MODULE_DESCRIPTION("HCLGE Driver");
12163 MODULE_VERSION(HCLGE_MOD_VERSION);