1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
7 #include <linux/types.h>
9 #include <linux/if_vlan.h>
11 #include "hclge_cmd.h"
14 #define HCLGE_MOD_VERSION "1.0"
15 #define HCLGE_DRIVER_NAME "hclge"
17 #define HCLGE_INVALID_VPORT 0xffff
19 #define HCLGE_PF_CFG_BLOCK_SIZE 32
20 #define HCLGE_PF_CFG_DESC_NUM \
21 (HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES)
23 #define HCLGE_VECTOR_REG_BASE 0x20000
24 #define HCLGE_MISC_VECTOR_REG_BASE 0x20400
26 #define HCLGE_VECTOR_REG_OFFSET 0x4
27 #define HCLGE_VECTOR_VF_OFFSET 0x100000
29 #define HCLGE_RSS_IND_TBL_SIZE 512
30 #define HCLGE_RSS_SET_BITMAP_MSK GENMASK(15, 0)
31 #define HCLGE_RSS_KEY_SIZE 40
32 #define HCLGE_RSS_HASH_ALGO_TOEPLITZ 0
33 #define HCLGE_RSS_HASH_ALGO_SIMPLE 1
34 #define HCLGE_RSS_HASH_ALGO_SYMMETRIC 2
35 #define HCLGE_RSS_HASH_ALGO_MASK GENMASK(3, 0)
36 #define HCLGE_RSS_CFG_TBL_NUM \
37 (HCLGE_RSS_IND_TBL_SIZE / HCLGE_RSS_CFG_TBL_SIZE)
39 #define HCLGE_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
40 #define HCLGE_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
41 #define HCLGE_D_PORT_BIT BIT(0)
42 #define HCLGE_S_PORT_BIT BIT(1)
43 #define HCLGE_D_IP_BIT BIT(2)
44 #define HCLGE_S_IP_BIT BIT(3)
45 #define HCLGE_V_TAG_BIT BIT(4)
47 #define HCLGE_RSS_TC_SIZE_0 1
48 #define HCLGE_RSS_TC_SIZE_1 2
49 #define HCLGE_RSS_TC_SIZE_2 4
50 #define HCLGE_RSS_TC_SIZE_3 8
51 #define HCLGE_RSS_TC_SIZE_4 16
52 #define HCLGE_RSS_TC_SIZE_5 32
53 #define HCLGE_RSS_TC_SIZE_6 64
54 #define HCLGE_RSS_TC_SIZE_7 128
56 #define HCLGE_MTA_TBL_SIZE 4096
58 #define HCLGE_TQP_RESET_TRY_TIMES 10
60 #define HCLGE_PHY_PAGE_MDIX 0
61 #define HCLGE_PHY_PAGE_COPPER 0
63 /* Page Selection Reg. */
64 #define HCLGE_PHY_PAGE_REG 22
66 /* Copper Specific Control Register */
67 #define HCLGE_PHY_CSC_REG 16
69 /* Copper Specific Status Register */
70 #define HCLGE_PHY_CSS_REG 17
72 #define HCLGE_PHY_MDIX_CTRL_S 5
73 #define HCLGE_PHY_MDIX_CTRL_M GENMASK(6, 5)
75 #define HCLGE_PHY_MDIX_STATUS_B 6
76 #define HCLGE_PHY_SPEED_DUP_RESOLVE_B 11
78 /* Factor used to calculate offset and bitmap of VF num */
79 #define HCLGE_VF_NUM_PER_CMD 64
80 #define HCLGE_VF_NUM_PER_BYTE 8
82 enum HLCGE_PORT_TYPE {
87 #define HCLGE_PF_ID_S 0
88 #define HCLGE_PF_ID_M GENMASK(2, 0)
89 #define HCLGE_VF_ID_S 3
90 #define HCLGE_VF_ID_M GENMASK(10, 3)
91 #define HCLGE_PORT_TYPE_B 11
92 #define HCLGE_NETWORK_PORT_ID_S 0
93 #define HCLGE_NETWORK_PORT_ID_M GENMASK(3, 0)
95 /* Reset related Registers */
96 #define HCLGE_MISC_RESET_STS_REG 0x20700
97 #define HCLGE_MISC_VECTOR_INT_STS 0x20800
98 #define HCLGE_GLOBAL_RESET_REG 0x20A00
99 #define HCLGE_GLOBAL_RESET_BIT 0
100 #define HCLGE_CORE_RESET_BIT 1
101 #define HCLGE_FUN_RST_ING 0x20C00
102 #define HCLGE_FUN_RST_ING_B 0
104 /* Vector0 register bits define */
105 #define HCLGE_VECTOR0_GLOBALRESET_INT_B 5
106 #define HCLGE_VECTOR0_CORERESET_INT_B 6
107 #define HCLGE_VECTOR0_IMPRESET_INT_B 7
109 /* Vector0 interrupt CMDQ event source register(RW) */
110 #define HCLGE_VECTOR0_CMDQ_SRC_REG 0x27100
111 /* CMDQ register bits for RX event(=MBX event) */
112 #define HCLGE_VECTOR0_RX_CMDQ_INT_B 1
114 #define HCLGE_MAC_DEFAULT_FRAME \
115 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN)
116 #define HCLGE_MAC_MIN_FRAME 64
117 #define HCLGE_MAC_MAX_FRAME 9728
119 #define HCLGE_SUPPORT_1G_BIT BIT(0)
120 #define HCLGE_SUPPORT_10G_BIT BIT(1)
121 #define HCLGE_SUPPORT_25G_BIT BIT(2)
122 #define HCLGE_SUPPORT_50G_BIT BIT(3)
123 #define HCLGE_SUPPORT_100G_BIT BIT(4)
125 enum HCLGE_DEV_STATE {
126 HCLGE_STATE_REINITING,
128 HCLGE_STATE_DISABLED,
129 HCLGE_STATE_REMOVING,
130 HCLGE_STATE_SERVICE_INITED,
131 HCLGE_STATE_SERVICE_SCHED,
132 HCLGE_STATE_RST_SERVICE_SCHED,
133 HCLGE_STATE_RST_HANDLING,
134 HCLGE_STATE_MBX_SERVICE_SCHED,
135 HCLGE_STATE_MBX_HANDLING,
136 HCLGE_STATE_STATISTICS_UPDATING,
137 HCLGE_STATE_CMD_DISABLE,
141 enum hclge_evt_cause {
142 HCLGE_VECTOR0_EVENT_RST,
143 HCLGE_VECTOR0_EVENT_MBX,
144 HCLGE_VECTOR0_EVENT_OTHER,
147 #define HCLGE_MPF_ENBALE 1
149 enum HCLGE_MAC_SPEED {
150 HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */
151 HCLGE_MAC_SPEED_100M = 100, /* 100 Mbps */
152 HCLGE_MAC_SPEED_1G = 1000, /* 1000 Mbps = 1 Gbps */
153 HCLGE_MAC_SPEED_10G = 10000, /* 10000 Mbps = 10 Gbps */
154 HCLGE_MAC_SPEED_25G = 25000, /* 25000 Mbps = 25 Gbps */
155 HCLGE_MAC_SPEED_40G = 40000, /* 40000 Mbps = 40 Gbps */
156 HCLGE_MAC_SPEED_50G = 50000, /* 50000 Mbps = 50 Gbps */
157 HCLGE_MAC_SPEED_100G = 100000 /* 100000 Mbps = 100 Gbps */
160 enum HCLGE_MAC_DUPLEX {
165 enum hclge_mta_dmac_sel_type {
166 HCLGE_MAC_ADDR_47_36,
167 HCLGE_MAC_ADDR_46_35,
168 HCLGE_MAC_ADDR_45_34,
169 HCLGE_MAC_ADDR_44_33,
176 u8 mac_addr[ETH_ALEN];
180 int link; /* store the link status of mac & phy (if phy exit)*/
181 struct phy_device *phydev;
182 struct mii_bus *mdio_bus;
183 phy_interface_t phy_if;
184 __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
185 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
189 void __iomem *io_base;
190 struct hclge_mac mac;
192 struct hclge_cmq cmq;
196 struct hlcge_tqp_stats {
197 /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */
198 u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
199 /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */
200 u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
204 /* copy of device pointer from pci_dev,
205 * used when perform DMA mapping
208 struct hnae3_queue q;
209 struct hlcge_tqp_stats tqp_stats;
210 u16 index; /* Global index in a NIC controller */
224 #define HCLGE_PG_NUM 4
225 #define HCLGE_SCH_MODE_SP 0
226 #define HCLGE_SCH_MODE_DWRR 1
227 struct hclge_pg_info {
229 u8 pg_sch_mode; /* 0: sp; 1: dwrr */
232 u8 tc_dwrr[HNAE3_MAX_TC];
235 struct hclge_tc_info {
237 u8 tc_sch_mode; /* 0: sp; 1: dwrr */
250 u8 mac_addr[ETH_ALEN];
256 struct hclge_tm_info {
258 u8 num_pg; /* It must be 1 if vNET-Base schd */
259 u8 pg_dwrr[HCLGE_PG_NUM];
260 u8 prio_tc[HNAE3_MAX_USER_PRIO];
261 struct hclge_pg_info pg_info[HCLGE_PG_NUM];
262 struct hclge_tc_info tc_info[HNAE3_MAX_TC];
263 enum hclge_fc_mode fc_mode;
264 u8 hw_pfc_map; /* Allow for packet drop or not on this TC */
267 struct hclge_comm_stats_str {
268 char desc[ETH_GSTRING_LEN];
269 unsigned long offset;
272 /* mac stats ,opcode id: 0x0032 */
273 struct hclge_mac_stats {
274 u64 mac_tx_mac_pause_num;
275 u64 mac_rx_mac_pause_num;
276 u64 mac_tx_pfc_pri0_pkt_num;
277 u64 mac_tx_pfc_pri1_pkt_num;
278 u64 mac_tx_pfc_pri2_pkt_num;
279 u64 mac_tx_pfc_pri3_pkt_num;
280 u64 mac_tx_pfc_pri4_pkt_num;
281 u64 mac_tx_pfc_pri5_pkt_num;
282 u64 mac_tx_pfc_pri6_pkt_num;
283 u64 mac_tx_pfc_pri7_pkt_num;
284 u64 mac_rx_pfc_pri0_pkt_num;
285 u64 mac_rx_pfc_pri1_pkt_num;
286 u64 mac_rx_pfc_pri2_pkt_num;
287 u64 mac_rx_pfc_pri3_pkt_num;
288 u64 mac_rx_pfc_pri4_pkt_num;
289 u64 mac_rx_pfc_pri5_pkt_num;
290 u64 mac_rx_pfc_pri6_pkt_num;
291 u64 mac_rx_pfc_pri7_pkt_num;
292 u64 mac_tx_total_pkt_num;
293 u64 mac_tx_total_oct_num;
294 u64 mac_tx_good_pkt_num;
295 u64 mac_tx_bad_pkt_num;
296 u64 mac_tx_good_oct_num;
297 u64 mac_tx_bad_oct_num;
298 u64 mac_tx_uni_pkt_num;
299 u64 mac_tx_multi_pkt_num;
300 u64 mac_tx_broad_pkt_num;
301 u64 mac_tx_undersize_pkt_num;
302 u64 mac_tx_oversize_pkt_num;
303 u64 mac_tx_64_oct_pkt_num;
304 u64 mac_tx_65_127_oct_pkt_num;
305 u64 mac_tx_128_255_oct_pkt_num;
306 u64 mac_tx_256_511_oct_pkt_num;
307 u64 mac_tx_512_1023_oct_pkt_num;
308 u64 mac_tx_1024_1518_oct_pkt_num;
309 u64 mac_tx_1519_2047_oct_pkt_num;
310 u64 mac_tx_2048_4095_oct_pkt_num;
311 u64 mac_tx_4096_8191_oct_pkt_num;
313 u64 mac_tx_8192_9216_oct_pkt_num;
314 u64 mac_tx_9217_12287_oct_pkt_num;
315 u64 mac_tx_12288_16383_oct_pkt_num;
316 u64 mac_tx_1519_max_good_oct_pkt_num;
317 u64 mac_tx_1519_max_bad_oct_pkt_num;
319 u64 mac_rx_total_pkt_num;
320 u64 mac_rx_total_oct_num;
321 u64 mac_rx_good_pkt_num;
322 u64 mac_rx_bad_pkt_num;
323 u64 mac_rx_good_oct_num;
324 u64 mac_rx_bad_oct_num;
325 u64 mac_rx_uni_pkt_num;
326 u64 mac_rx_multi_pkt_num;
327 u64 mac_rx_broad_pkt_num;
328 u64 mac_rx_undersize_pkt_num;
329 u64 mac_rx_oversize_pkt_num;
330 u64 mac_rx_64_oct_pkt_num;
331 u64 mac_rx_65_127_oct_pkt_num;
332 u64 mac_rx_128_255_oct_pkt_num;
333 u64 mac_rx_256_511_oct_pkt_num;
334 u64 mac_rx_512_1023_oct_pkt_num;
335 u64 mac_rx_1024_1518_oct_pkt_num;
336 u64 mac_rx_1519_2047_oct_pkt_num;
337 u64 mac_rx_2048_4095_oct_pkt_num;
338 u64 mac_rx_4096_8191_oct_pkt_num;
340 u64 mac_rx_8192_9216_oct_pkt_num;
341 u64 mac_rx_9217_12287_oct_pkt_num;
342 u64 mac_rx_12288_16383_oct_pkt_num;
343 u64 mac_rx_1519_max_good_oct_pkt_num;
344 u64 mac_rx_1519_max_bad_oct_pkt_num;
346 u64 mac_tx_fragment_pkt_num;
347 u64 mac_tx_undermin_pkt_num;
348 u64 mac_tx_jabber_pkt_num;
349 u64 mac_tx_err_all_pkt_num;
350 u64 mac_tx_from_app_good_pkt_num;
351 u64 mac_tx_from_app_bad_pkt_num;
352 u64 mac_rx_fragment_pkt_num;
353 u64 mac_rx_undermin_pkt_num;
354 u64 mac_rx_jabber_pkt_num;
355 u64 mac_rx_fcs_err_pkt_num;
356 u64 mac_rx_send_app_good_pkt_num;
357 u64 mac_rx_send_app_bad_pkt_num;
360 #define HCLGE_STATS_TIMER_INTERVAL (60 * 5)
361 struct hclge_hw_stats {
362 struct hclge_mac_stats mac_stats;
366 struct hclge_vlan_type_cfg {
367 u16 rx_ot_fst_vlan_type;
368 u16 rx_ot_sec_vlan_type;
369 u16 rx_in_fst_vlan_type;
370 u16 rx_in_sec_vlan_type;
376 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1,
377 HCLGE_FD_MODE_DEPTH_1K_WIDTH_400B_STAGE_2,
378 HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1,
379 HCLGE_FD_MODE_DEPTH_2K_WIDTH_200B_STAGE_2,
382 enum HCLGE_FD_KEY_TYPE {
383 HCLGE_FD_KEY_BASE_ON_PTYPE,
384 HCLGE_FD_KEY_BASE_ON_TUPLE,
387 enum HCLGE_FD_STAGE {
392 /* OUTER_XXX indicates tuples in tunnel header of tunnel packet
393 * INNER_XXX indicate tuples in tunneled header of tunnel packet or
394 * tuples of non-tunnel packet
396 enum HCLGE_FD_TUPLE {
430 enum HCLGE_FD_META_DATA {
447 static const struct key_info meta_data_key_info[] = {
448 { PACKET_TYPE_ID, 6},
458 static const struct key_info tuple_key_info[] = {
459 { OUTER_DST_MAC, 48},
460 { OUTER_SRC_MAC, 48},
461 { OUTER_VLAN_TAG_FST, 16},
462 { OUTER_VLAN_TAG_SEC, 16},
463 { OUTER_ETH_TYPE, 16},
466 { OUTER_IP_PROTO, 8},
470 { OUTER_SRC_PORT, 16},
471 { OUTER_DST_PORT, 16},
473 { OUTER_TUN_VNI, 24},
474 { OUTER_TUN_FLOW_ID, 8},
475 { INNER_DST_MAC, 48},
476 { INNER_SRC_MAC, 48},
477 { INNER_VLAN_TAG_FST, 16},
478 { INNER_VLAN_TAG_SEC, 16},
479 { INNER_ETH_TYPE, 16},
482 { INNER_IP_PROTO, 8},
486 { INNER_SRC_PORT, 16},
487 { INNER_DST_PORT, 16},
491 #define MAX_KEY_LENGTH 400
492 #define MAX_KEY_DWORDS DIV_ROUND_UP(MAX_KEY_LENGTH / 8, 4)
493 #define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4)
494 #define MAX_META_DATA_LENGTH 32
496 enum HCLGE_FD_PACKET_TYPE {
501 enum HCLGE_FD_ACTION {
502 HCLGE_FD_ACTION_ACCEPT_PACKET,
503 HCLGE_FD_ACTION_DROP_PACKET,
506 struct hclge_fd_key_cfg {
508 u8 inner_sipv6_word_en;
509 u8 inner_dipv6_word_en;
510 u8 outer_sipv6_word_en;
511 u8 outer_dipv6_word_en;
513 u32 meta_data_active;
516 struct hclge_fd_cfg {
521 u32 rule_num[2]; /* rule entry number */
522 u16 cnt_num[2]; /* rule hit counter number */
523 struct hclge_fd_key_cfg key_cfg[2];
526 struct hclge_fd_rule_tuples {
539 struct hclge_fd_rule {
540 struct hlist_node rule_node;
541 struct hclge_fd_rule_tuples tuples;
542 struct hclge_fd_rule_tuples tuples_mask;
551 struct hclge_fd_ad_data {
554 u8 forward_to_direct_queue;
559 u8 write_rule_id_to_bd;
564 /* For each bit of TCAM entry, it uses a pair of 'x' and
565 * 'y' to indicate which value to match, like below:
566 * ----------------------------------
567 * | bit x | bit y | search value |
568 * ----------------------------------
569 * | 0 | 0 | always hit |
570 * ----------------------------------
571 * | 1 | 0 | match '0' |
572 * ----------------------------------
573 * | 0 | 1 | match '1' |
574 * ----------------------------------
575 * | 1 | 1 | invalid |
576 * ----------------------------------
577 * Then for input key(k) and mask(v), we can calculate the value by
582 #define calc_x(x, k, v) ((x) = (~(k) & (v)))
583 #define calc_y(y, k, v) \
585 const typeof(k) _k_ = (k); \
586 const typeof(v) _v_ = (v); \
587 (y) = (_k_ ^ ~_v_) & (_k_); \
590 #define HCLGE_VPORT_NUM 256
592 struct pci_dev *pdev;
593 struct hnae3_ae_dev *ae_dev;
595 struct hclge_misc_vector misc_vector;
596 struct hclge_hw_stats hw_stats;
599 enum hnae3_reset_type reset_type;
600 unsigned long reset_request; /* reset has been requested */
601 unsigned long reset_pending; /* client rst is pending to be served */
603 u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */
604 u16 num_tqps; /* Num task queue pairs of this PF */
605 u16 num_req_vfs; /* Num VFs requested for this PF */
607 u16 base_tqp_pid; /* Base task tqp physical id of this PF */
608 u16 alloc_rss_size; /* Allocated RSS task queue */
609 u16 rss_size_max; /* HW defined max RSS task queue */
611 u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */
612 u16 num_alloc_vport; /* Num vports this driver supports */
618 enum hclge_fc_mode fc_mode_last_time;
620 #define HCLGE_FLAG_TC_BASE_SCH_MODE 1
621 #define HCLGE_FLAG_VNET_BASE_SCH_MODE 2
628 struct hclge_tm_info tm_info;
633 u16 roce_base_msix_offset;
637 u16 num_roce_msi; /* Num of roce vectors for this PF */
638 int roce_base_vector;
640 u16 pending_udp_bitmap;
645 u16 adminq_work_limit; /* Num of admin receive queue desc to process */
646 unsigned long service_timer_period;
647 unsigned long service_timer_previous;
648 struct timer_list service_timer;
649 struct work_struct service_task;
650 struct work_struct rst_service_task;
651 struct work_struct mbx_service_task;
654 int num_alloc_vfs; /* Actual number of VFs allocated */
656 struct hclge_tqp *htqp;
657 struct hclge_vport *vport;
659 struct dentry *hclge_dbgfs;
661 struct hnae3_client *nic_client;
662 struct hnae3_client *roce_client;
664 #define HCLGE_FLAG_MAIN BIT(0)
665 #define HCLGE_FLAG_DCB_CAPABLE BIT(1)
666 #define HCLGE_FLAG_DCB_ENABLE BIT(2)
667 #define HCLGE_FLAG_MQPRIO_ENABLE BIT(3)
670 u32 pkt_buf_size; /* Total pf buf size for tx/rx */
671 u32 mps; /* Max packet size */
673 enum hclge_mta_dmac_sel_type mta_mac_sel_type;
674 bool enable_mta; /* Multicast filter enable */
676 struct hclge_vlan_type_cfg vlan_type_cfg;
678 unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)];
680 struct hclge_fd_cfg fd_cfg;
681 struct hlist_head fd_rule_list;
682 u16 hclge_fd_rule_num;
685 /* VPort level vlan tag configuration for TX direction */
686 struct hclge_tx_vtag_cfg {
687 bool accept_tag1; /* Whether accept tag1 packet from host */
688 bool accept_untag1; /* Whether accept untag1 packet from host */
691 bool insert_tag1_en; /* Whether insert inner vlan tag */
692 bool insert_tag2_en; /* Whether insert outer vlan tag */
693 u16 default_tag1; /* The default inner vlan tag to insert */
694 u16 default_tag2; /* The default outer vlan tag to insert */
697 /* VPort level vlan tag configuration for RX direction */
698 struct hclge_rx_vtag_cfg {
699 bool strip_tag1_en; /* Whether strip inner vlan tag */
700 bool strip_tag2_en; /* Whether strip outer vlan tag */
701 bool vlan1_vlan_prionly;/* Inner VLAN Tag up to descriptor Enable */
702 bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */
705 struct hclge_rss_tuple_cfg {
717 u16 alloc_tqps; /* Allocated Tx/Rx queues */
719 u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */
720 /* User configured lookup table entries */
721 u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE];
722 int rss_algo; /* User configured hash algorithm */
723 /* User configured rss tuple sets */
724 struct hclge_rss_tuple_cfg rss_tuple_sets;
729 u16 bw_limit; /* VSI BW Limit (0 = disabled) */
732 struct hclge_tx_vtag_cfg txvlan_cfg;
733 struct hclge_rx_vtag_cfg rxvlan_cfg;
736 struct hclge_dev *back; /* Back reference to associated dev */
737 struct hnae3_handle nic;
738 struct hnae3_handle roce;
740 bool accept_mta_mc; /* whether to accept mta filter multicast */
741 unsigned long mta_shadow[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)];
744 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
745 bool en_mc, bool en_bc, int vport_id);
747 int hclge_add_uc_addr_common(struct hclge_vport *vport,
748 const unsigned char *addr);
749 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
750 const unsigned char *addr);
751 int hclge_add_mc_addr_common(struct hclge_vport *vport,
752 const unsigned char *addr);
753 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
754 const unsigned char *addr);
756 int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
759 int hclge_update_mta_status_common(struct hclge_vport *vport,
760 unsigned long *status,
765 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle);
766 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
767 int vector_id, bool en,
768 struct hnae3_ring_chain_node *ring_chain);
770 static inline int hclge_get_queue_id(struct hnae3_queue *queue)
772 struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q);
777 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex);
778 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
779 u16 vlan_id, bool is_kill);
780 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable);
782 int hclge_buffer_alloc(struct hclge_dev *hdev);
783 int hclge_rss_init_hw(struct hclge_dev *hdev);
784 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
786 void hclge_mbx_handler(struct hclge_dev *hdev);
787 void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
788 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id);
789 int hclge_cfg_flowctrl(struct hclge_dev *hdev);
790 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);