1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Marvell OcteonTx2 RVU Ethernet driver
4 * Copyright (C) 2020 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
14 #include <linux/ethtool.h>
15 #include <linux/pci.h>
16 #include <linux/iommu.h>
17 #include <linux/net_tstamp.h>
18 #include <linux/ptp_clock_kernel.h>
19 #include <linux/timecounter.h>
20 #include <linux/soc/marvell/octeontx2/asm.h>
21 #include <net/pkt_cls.h>
26 #include "otx2_txrx.h"
27 #include <rvu_trace.h>
30 #define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063
31 #define PCI_DEVID_OCTEONTX2_RVU_VF 0xA064
32 #define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8
34 #define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
37 #define PCI_CFG_REG_BAR_NUM 2
38 #define PCI_MBOX_BAR_NUM 4
42 enum arua_mapped_qtypes {
47 /* NIX LF interrupts range*/
48 #define NIX_LF_QINT_VEC_START 0x00
49 #define NIX_LF_CINT_VEC_START 0x40
50 #define NIX_LF_GINT_VEC 0x80
51 #define NIX_LF_ERR_VEC 0x81
52 #define NIX_LF_POISON_VEC 0x82
54 /* Send skid of 2000 packets required for CQ size of 4K CQEs. */
55 #define SEND_CQ_SKID 2000
57 /* RSS configuration */
59 u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE];
62 struct otx2_rss_info {
66 #define RSS_HASH_KEY_SIZE 44 /* 352 bit key */
67 u8 key[RSS_HASH_KEY_SIZE];
68 struct otx2_rss_ctx *rss_ctx[MAX_RSS_GROUPS];
71 /* NIX (or NPC) RX errors */
82 NPC_ERRLVL_NIX = 0x0F,
85 enum otx2_errcodes_re {
86 /* NPC_ERRLVL_RE errcodes */
88 ERRCODE_FCS_RCV = 0x8,
89 ERRCODE_UNDERSIZE = 0x10,
90 ERRCODE_OVERSIZE = 0x11,
91 ERRCODE_OL2_LEN_MISMATCH = 0x12,
92 /* NPC_ERRLVL_NIX errcodes */
93 ERRCODE_OL3_LEN = 0x10,
94 ERRCODE_OL4_LEN = 0x11,
95 ERRCODE_OL4_CSUM = 0x12,
96 ERRCODE_IL3_LEN = 0x20,
97 ERRCODE_IL4_LEN = 0x21,
98 ERRCODE_IL4_CSUM = 0x22,
102 enum nix_stat_lf_tx {
112 enum nix_stat_lf_rx {
123 RX_DRP_L3BCAST = 0xa,
124 RX_DRP_L3MCAST = 0xb,
128 struct otx2_dev_stats {
144 /* Driver counted stats */
145 struct otx2_drv_stats {
146 atomic_t rx_fcs_errs;
147 atomic_t rx_oversize_errs;
148 atomic_t rx_undersize_errs;
149 atomic_t rx_csum_errs;
150 atomic_t rx_len_errs;
151 atomic_t rx_other_errs;
155 struct otx2_mbox mbox;
156 struct work_struct mbox_wrk;
157 struct otx2_mbox mbox_up;
158 struct work_struct mbox_up_wrk;
159 struct otx2_nic *pfvf;
160 void *bbuf_base; /* Bounce buffer for mbox memory */
161 struct mutex lock; /* serialize mailbox access */
162 int num_msgs; /* mbox number of messages */
163 int up_num_msgs; /* mbox_up number of messages */
167 struct pci_dev *pdev;
168 struct otx2_rss_info rss_info;
177 u32 stack_pg_ptrs; /* No of ptrs per stack page */
178 u32 stack_pg_bytes; /* Size of stack page */
182 u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
183 u16 matchall_ipolicer;
185 /* HW settings, coalescing etc */
200 u8 cint_cnt; /* CQ interrupt count */
201 u16 npa_msixoff; /* Offset of NPA vectors */
202 u16 nix_msixoff; /* Offset of NIX vectors */
204 cpumask_var_t *affinity_mask;
207 struct otx2_dev_stats dev_stats;
208 struct otx2_drv_stats drv_stats;
209 u64 cgx_rx_stats[CGX_RX_STATS_COUNT];
210 u64 cgx_tx_stats[CGX_TX_STATS_COUNT];
211 u64 cgx_fec_corr_blks;
212 u64 cgx_fec_uncorr_blks;
213 u8 cgx_links; /* No. of CGX links present in HW */
214 u8 lbk_links; /* No. of LBK links present in HW */
217 #define CN10K_LMTST 2
218 unsigned long cap_flag;
220 #define LMT_LINE_SIZE 128
221 #define LMT_BURST_SIZE 32 /* 32 LMTST lines for burst SQE flush */
232 struct otx2_vf_config {
234 struct delayed_work link_event_work;
235 bool intf_down; /* interface was either configured or not */
243 struct work_struct work;
248 struct delayed_work pool_refill_work;
253 struct ptp_clock_info ptp_info;
254 struct ptp_clock *ptp_clock;
255 struct otx2_nic *nic;
257 struct cyclecounter cycle_counter;
258 struct timecounter time_counter;
261 #define OTX2_HW_TIMESTAMP_LEN 8
263 struct otx2_mac_table {
269 struct otx2_flow_config {
270 u16 entry[NPC_MAX_NONCONTIG_ENTRIES];
274 #define OTX2_DEFAULT_FLOWCOUNT 16
275 #define OTX2_MAX_UNICAST_FLOWS 8
276 #define OTX2_MAX_VLAN_FLOWS 1
277 #define OTX2_MAX_TC_FLOWS OTX2_DEFAULT_FLOWCOUNT
278 #define OTX2_MCAM_COUNT (OTX2_DEFAULT_FLOWCOUNT + \
279 OTX2_MAX_UNICAST_FLOWS + \
285 #define OTX2_PER_VF_VLAN_FLOWS 2 /* Rx + Tx per VF */
286 #define OTX2_VF_VLAN_RX_INDEX 0
287 #define OTX2_VF_VLAN_TX_INDEX 1
288 u16 tc_flower_offset;
289 u16 ntuple_max_flows;
291 u8 dmacflt_max_flows;
292 u8 *bmap_to_dmacindex;
293 unsigned long dmacflt_bmap;
294 struct list_head flow_list;
297 struct otx2_tc_info {
298 /* hash table to store TC offloaded flows */
299 struct rhashtable flow_table;
300 struct rhashtable_params flow_ht_params;
301 DECLARE_BITMAP(tc_entries_bitmap, OTX2_MAX_TC_FLOWS);
302 unsigned long num_entries;
306 int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura);
307 void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq,
309 void (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq);
310 void (*aura_freeptr)(void *dev, int aura, u64 buf);
314 void __iomem *reg_base;
315 struct net_device *netdev;
316 struct dev_hw_ops *hw_ops;
319 u16 rbsize; /* Receive buffer size */
321 #define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0)
322 #define OTX2_FLAG_TX_TSTAMP_ENABLED BIT_ULL(1)
323 #define OTX2_FLAG_INTF_DOWN BIT_ULL(2)
324 #define OTX2_FLAG_MCAM_ENTRIES_ALLOC BIT_ULL(3)
325 #define OTX2_FLAG_NTUPLE_SUPPORT BIT_ULL(4)
326 #define OTX2_FLAG_UCAST_FLTR_SUPPORT BIT_ULL(5)
327 #define OTX2_FLAG_RX_VLAN_SUPPORT BIT_ULL(6)
328 #define OTX2_FLAG_VF_VLAN_SUPPORT BIT_ULL(7)
329 #define OTX2_FLAG_PF_SHUTDOWN BIT_ULL(8)
330 #define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9)
331 #define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10)
332 #define OTX2_FLAG_TC_FLOWER_SUPPORT BIT_ULL(11)
333 #define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12)
334 #define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13)
335 #define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14)
338 struct otx2_qset qset;
340 struct pci_dev *pdev;
345 struct mbox *mbox_pfvf;
346 struct workqueue_struct *mbox_wq;
347 struct workqueue_struct *mbox_pfvf_wq;
350 u16 pcifunc; /* RVU PF_FUNC */
351 u16 bpid[NIX_MAX_BPID_CHAN];
352 struct otx2_vf_config *vf_configs;
353 struct cgx_link_user_info linfo;
356 struct work_struct reset_task;
357 struct workqueue_struct *flr_wq;
358 struct flr_work *flr_wrk;
359 struct refill_work *refill_wrk;
360 struct workqueue_struct *otx2_wq;
361 struct work_struct rx_mode_work;
362 struct otx2_mac_table *mac_table;
367 /* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */
369 /* LMTST Lines info */
370 struct qmem *dync_lmt;
375 struct otx2_ptp *ptp;
376 struct hwtstamp_config tstamp;
378 struct otx2_flow_config *flow_cfg;
379 struct otx2_tc_info tc_info;
380 unsigned long rq_bmap;
383 static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
385 return pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF;
388 static inline bool is_96xx_A0(struct pci_dev *pdev)
390 return (pdev->revision == 0x00) &&
391 (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF);
394 static inline bool is_96xx_B0(struct pci_dev *pdev)
396 return (pdev->revision == 0x01) &&
397 (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF);
400 /* REVID for PCIe devices.
401 * Bits 0..1: minor pass, bit 3..2: major pass
404 #define PCI_REVISION_ID_96XX 0x00
405 #define PCI_REVISION_ID_95XX 0x10
406 #define PCI_REVISION_ID_LOKI 0x20
407 #define PCI_REVISION_ID_98XX 0x30
408 #define PCI_REVISION_ID_95XXMM 0x40
410 static inline bool is_dev_otx2(struct pci_dev *pdev)
412 u8 midr = pdev->revision & 0xF0;
414 return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX ||
415 midr == PCI_REVISION_ID_LOKI || midr == PCI_REVISION_ID_98XX ||
416 midr == PCI_REVISION_ID_95XXMM);
419 static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
421 struct otx2_hw *hw = &pfvf->hw;
423 pfvf->hw.cq_time_wait = CQ_TIMER_THRESH_DEFAULT;
424 pfvf->hw.cq_ecount_wait = CQ_CQE_THRESH_DEFAULT;
425 pfvf->hw.cq_qcount_wait = CQ_QCOUNT_DEFAULT;
427 __set_bit(HW_TSO, &hw->cap_flag);
429 if (is_96xx_A0(pfvf->pdev)) {
430 __clear_bit(HW_TSO, &hw->cap_flag);
432 /* Time based irq coalescing is not supported */
433 pfvf->hw.cq_qcount_wait = 0x0;
435 /* Due to HW issue previous silicons required minimum
436 * 600 unused CQE to avoid CQ overflow.
438 pfvf->hw.rq_skid = 600;
439 pfvf->qset.rqe_cnt = Q_COUNT(Q_SIZE_1K);
441 if (is_96xx_B0(pfvf->pdev))
442 __clear_bit(HW_TSO, &hw->cap_flag);
444 if (!is_dev_otx2(pfvf->pdev)) {
445 __set_bit(CN10K_MBOX, &hw->cap_flag);
446 __set_bit(CN10K_LMTST, &hw->cap_flag);
450 /* Register read/write APIs */
451 static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset)
455 switch ((offset >> RVU_FUNC_BLKADDR_SHIFT) & RVU_FUNC_BLKADDR_MASK) {
457 blkaddr = nic->nix_blkaddr;
460 blkaddr = BLKADDR_NPA;
463 blkaddr = BLKADDR_RVUM;
467 offset &= ~(RVU_FUNC_BLKADDR_MASK << RVU_FUNC_BLKADDR_SHIFT);
468 offset |= (blkaddr << RVU_FUNC_BLKADDR_SHIFT);
470 return nic->reg_base + offset;
473 static inline void otx2_write64(struct otx2_nic *nic, u64 offset, u64 val)
475 void __iomem *addr = otx2_get_regaddr(nic, offset);
480 static inline u64 otx2_read64(struct otx2_nic *nic, u64 offset)
482 void __iomem *addr = otx2_get_regaddr(nic, offset);
487 /* Mbox bounce buffer APIs */
488 static inline int otx2_mbox_bbuf_init(struct mbox *mbox, struct pci_dev *pdev)
490 struct otx2_mbox *otx2_mbox;
491 struct otx2_mbox_dev *mdev;
493 mbox->bbuf_base = devm_kmalloc(&pdev->dev, MBOX_SIZE, GFP_KERNEL);
494 if (!mbox->bbuf_base)
497 /* Overwrite mbox mbase to point to bounce buffer, so that PF/VF
498 * prepare all mbox messages in bounce buffer instead of directly
501 otx2_mbox = &mbox->mbox;
502 mdev = &otx2_mbox->dev[0];
503 mdev->mbase = mbox->bbuf_base;
505 otx2_mbox = &mbox->mbox_up;
506 mdev = &otx2_mbox->dev[0];
507 mdev->mbase = mbox->bbuf_base;
511 static inline void otx2_sync_mbox_bbuf(struct otx2_mbox *mbox, int devid)
513 u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
514 void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
515 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
516 struct mbox_hdr *hdr;
519 if (mdev->mbase == hw_mbase)
522 hdr = hw_mbase + mbox->rx_start;
523 msg_size = hdr->msg_size;
525 if (msg_size > mbox->rx_size - msgs_offset)
526 msg_size = mbox->rx_size - msgs_offset;
528 /* Copy mbox messages from mbox memory to bounce buffer */
529 memcpy(mdev->mbase + mbox->rx_start,
530 hw_mbase + mbox->rx_start, msg_size + msgs_offset);
533 /* With the absence of API for 128-bit IO memory access for arm64,
534 * implement required operations at place.
536 #if defined(CONFIG_ARM64)
537 static inline void otx2_write128(u64 lo, u64 hi, void __iomem *addr)
539 __asm__ volatile("stp %x[x0], %x[x1], [%x[p1],#0]!"
540 ::[x0]"r"(lo), [x1]"r"(hi), [p1]"r"(addr));
543 static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr)
547 __asm__ volatile(".cpu generic+lse\n"
548 "ldadd %x[i], %x[r], [%[b]]"
549 : [r]"=r"(result), "+m"(*ptr)
550 : [i]"r"(incr), [b]"r"(ptr)
556 #define otx2_write128(lo, hi, addr) writeq((hi) | (lo), addr)
557 #define otx2_atomic64_add(incr, ptr) ({ *ptr += incr; })
560 static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura,
561 u64 *ptrs, u64 num_ptrs,
564 u64 size = 0, count_eot = 0;
565 u64 tar_addr, val = 0;
567 tar_addr = (__force u64)otx2_get_regaddr(pfvf, NPA_LF_AURA_BATCH_FREE0);
568 /* LMTID is same as AURA Id */
569 val = (aura & 0x7FF) | BIT_ULL(63);
570 /* Set if [127:64] of last 128bit word has a valid pointer */
571 count_eot = (num_ptrs % 2) ? 0ULL : 1ULL;
572 /* Set AURA ID to free pointer */
573 ptrs[0] = (count_eot << 32) | (aura & 0xFFFFF);
574 /* Target address for LMTST flush tells HW how many 128bit
575 * words are valid from NPA_LF_AURA_BATCH_FREE0.
577 * tar_addr[6:4] is LMTST size-1 in units of 128b.
580 size = (sizeof(u64) * num_ptrs) / 16;
583 tar_addr |= ((size - 1) & 0x7) << 4;
585 memcpy(lmt_addr, ptrs, sizeof(u64) * num_ptrs);
586 /* Perform LMTST flush */
587 cn10k_lmt_flush(val, tar_addr);
590 static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf)
592 struct otx2_nic *pfvf = dev;
593 struct otx2_pool *pool;
596 pool = &pfvf->qset.pool[aura];
598 __cn10k_aura_freeptr(pfvf, aura, ptrs, 2, pool->lmt_addr);
601 /* Alloc pointer from pool/aura */
602 static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura)
604 u64 *ptr = (u64 *)otx2_get_regaddr(pfvf,
605 NPA_LF_AURA_OP_ALLOCX(0));
606 u64 incr = (u64)aura | BIT_ULL(63);
608 return otx2_atomic64_add(incr, ptr);
611 /* Free pointer to a pool/aura */
612 static inline void otx2_aura_freeptr(void *dev, int aura, u64 buf)
614 struct otx2_nic *pfvf = dev;
615 void __iomem *addr = otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0);
617 otx2_write128(buf, (u64)aura | BIT_ULL(63), addr);
620 static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx)
622 if (type == AURA_NIX_SQ)
623 return pfvf->hw.rqpool_cnt + idx;
630 static inline int otx2_sync_mbox_msg(struct mbox *mbox)
634 if (!otx2_mbox_nonempty(&mbox->mbox, 0))
636 otx2_mbox_msg_send(&mbox->mbox, 0);
637 err = otx2_mbox_wait_for_rsp(&mbox->mbox, 0);
641 return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0);
644 static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid)
648 if (!otx2_mbox_nonempty(&mbox->mbox_up, devid))
650 otx2_mbox_msg_send(&mbox->mbox_up, devid);
651 err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid);
655 return otx2_mbox_check_rsp_msgs(&mbox->mbox_up, devid);
658 /* Use this API to send mbox msgs in atomic context
659 * where sleeping is not allowed
661 static inline int otx2_sync_mbox_msg_busy_poll(struct mbox *mbox)
665 if (!otx2_mbox_nonempty(&mbox->mbox, 0))
667 otx2_mbox_msg_send(&mbox->mbox, 0);
668 err = otx2_mbox_busy_poll_for_rsp(&mbox->mbox, 0);
672 return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0);
675 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
676 static struct _req_type __maybe_unused \
677 *otx2_mbox_alloc_msg_ ## _fn_name(struct mbox *mbox) \
679 struct _req_type *req; \
681 req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
682 &mbox->mbox, 0, sizeof(struct _req_type), \
683 sizeof(struct _rsp_type)); \
686 req->hdr.sig = OTX2_MBOX_REQ_SIG; \
688 trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req)); \
695 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
697 otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
698 struct _req_type *req, \
699 struct _rsp_type *rsp); \
704 /* Time to wait before watchdog kicks off */
705 #define OTX2_TX_TIMEOUT (100 * HZ)
707 #define RVU_PFVF_PF_SHIFT 10
708 #define RVU_PFVF_PF_MASK 0x3F
709 #define RVU_PFVF_FUNC_SHIFT 0
710 #define RVU_PFVF_FUNC_MASK 0x3FF
712 static inline int rvu_get_pf(u16 pcifunc)
714 return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
717 static inline dma_addr_t otx2_dma_map_page(struct otx2_nic *pfvf,
719 size_t offset, size_t size,
720 enum dma_data_direction dir)
724 iova = dma_map_page_attrs(pfvf->dev, page,
725 offset, size, dir, DMA_ATTR_SKIP_CPU_SYNC);
726 if (unlikely(dma_mapping_error(pfvf->dev, iova)))
727 return (dma_addr_t)NULL;
731 static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf,
732 dma_addr_t addr, size_t size,
733 enum dma_data_direction dir)
735 dma_unmap_page_attrs(pfvf->dev, addr, size,
736 dir, DMA_ATTR_SKIP_CPU_SYNC);
740 void otx2_free_cints(struct otx2_nic *pfvf, int n);
741 void otx2_set_cints_affinity(struct otx2_nic *pfvf);
742 int otx2_set_mac_address(struct net_device *netdev, void *p);
743 int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu);
744 void otx2_tx_timeout(struct net_device *netdev, unsigned int txq);
745 void otx2_get_mac_from_af(struct net_device *netdev);
746 void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx);
747 int otx2_config_pause_frm(struct otx2_nic *pfvf);
748 void otx2_setup_segmentation(struct otx2_nic *pfvf);
750 /* RVU block related APIs */
751 int otx2_attach_npa_nix(struct otx2_nic *pfvf);
752 int otx2_detach_resources(struct mbox *mbox);
753 int otx2_config_npa(struct otx2_nic *pfvf);
754 int otx2_sq_aura_pool_init(struct otx2_nic *pfvf);
755 int otx2_rq_aura_pool_init(struct otx2_nic *pfvf);
756 void otx2_aura_pool_free(struct otx2_nic *pfvf);
757 void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type);
758 void otx2_sq_free_sqbs(struct otx2_nic *pfvf);
759 int otx2_config_nix(struct otx2_nic *pfvf);
760 int otx2_config_nix_queues(struct otx2_nic *pfvf);
761 int otx2_txschq_config(struct otx2_nic *pfvf, int lvl);
762 int otx2_txsch_alloc(struct otx2_nic *pfvf);
763 int otx2_txschq_stop(struct otx2_nic *pfvf);
764 void otx2_sqb_flush(struct otx2_nic *pfvf);
765 int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
767 int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
768 void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
769 int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
770 void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
771 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
772 int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
773 int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
774 int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
777 /* RSS configuration APIs*/
778 int otx2_rss_init(struct otx2_nic *pfvf);
779 int otx2_set_flowkey_cfg(struct otx2_nic *pfvf);
780 void otx2_set_rss_key(struct otx2_nic *pfvf);
781 int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id);
784 void mbox_handler_msix_offset(struct otx2_nic *pfvf,
785 struct msix_offset_rsp *rsp);
786 void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf,
787 struct npa_lf_alloc_rsp *rsp);
788 void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
789 struct nix_lf_alloc_rsp *rsp);
790 void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
791 struct nix_txsch_alloc_rsp *rsp);
792 void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
793 struct cgx_stats_rsp *rsp);
794 void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf,
795 struct cgx_fec_stats_rsp *rsp);
796 void otx2_set_fec_stats_count(struct otx2_nic *pfvf);
797 void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf,
798 struct nix_bp_cfg_rsp *rsp);
800 /* Device stats APIs */
801 void otx2_get_dev_stats(struct otx2_nic *pfvf);
802 void otx2_get_stats64(struct net_device *netdev,
803 struct rtnl_link_stats64 *stats);
804 void otx2_update_lmac_stats(struct otx2_nic *pfvf);
805 void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf);
806 int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx);
807 int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx);
808 void otx2_set_ethtool_ops(struct net_device *netdev);
809 void otx2vf_set_ethtool_ops(struct net_device *netdev);
811 int otx2_open(struct net_device *netdev);
812 int otx2_stop(struct net_device *netdev);
813 int otx2_set_real_num_queues(struct net_device *netdev,
814 int tx_queues, int rx_queues);
815 /* MCAM filter related APIs */
816 int otx2_mcam_flow_init(struct otx2_nic *pf);
817 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf);
818 void otx2_mcam_flow_del(struct otx2_nic *pf);
819 int otx2_destroy_ntuple_flows(struct otx2_nic *pf);
820 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf);
821 int otx2_get_flow(struct otx2_nic *pfvf,
822 struct ethtool_rxnfc *nfc, u32 location);
823 int otx2_get_all_flows(struct otx2_nic *pfvf,
824 struct ethtool_rxnfc *nfc, u32 *rule_locs);
825 int otx2_add_flow(struct otx2_nic *pfvf,
826 struct ethtool_rxnfc *nfc);
827 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location);
828 int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
829 struct npc_install_flow_req *req);
830 void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id);
831 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
832 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
833 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
834 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
835 u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
837 int otx2_init_tc(struct otx2_nic *nic);
838 void otx2_shutdown_tc(struct otx2_nic *nic);
839 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
841 /* CGX/RPM DMAC filters support */
842 int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf);
843 int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
844 int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
845 int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos);
846 void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf);
847 void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf);
848 #endif /* OTX2_COMMON_H */