1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Marvell RVU Ethernet driver
4 * Copyright (C) 2020 Marvell.
11 #include <linux/etherdevice.h>
12 #include <linux/iommu.h>
13 #include <linux/if_vlan.h>
15 #define LBK_CHAN_BASE 0x000
16 #define SDP_CHAN_BASE 0x700
17 #define CGX_CHAN_BASE 0x800
19 #define OTX2_DATA_ALIGN(X) ALIGN(X, OTX2_ALIGN)
20 #define OTX2_HEAD_ROOM OTX2_ALIGN
22 #define OTX2_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN)
23 #define OTX2_MIN_MTU 64
25 #define OTX2_MAX_GSO_SEGS 255
26 #define OTX2_MAX_FRAGS_IN_SQE 9
28 /* Rx buffer size should be in multiples of 128bytes */
29 #define RCV_FRAG_LEN1(x) \
30 ((OTX2_HEAD_ROOM + OTX2_DATA_ALIGN(x)) + \
31 OTX2_DATA_ALIGN(sizeof(struct skb_shared_info)))
33 /* Prefer 2048 byte buffers for better last level cache
34 * utilization or data distribution across regions.
36 #define RCV_FRAG_LEN(x) \
37 ((RCV_FRAG_LEN1(x) < 2048) ? 2048 : RCV_FRAG_LEN1(x))
39 #define DMA_BUFFER_LEN(x) \
40 ((x) - OTX2_HEAD_ROOM - \
41 OTX2_DATA_ALIGN(sizeof(struct skb_shared_info)))
43 /* IRQ triggered when NIX_LF_CINTX_CNT[ECOUNT]
44 * is equal to this value.
46 #define CQ_CQE_THRESH_DEFAULT 10
48 /* IRQ triggered when NIX_LF_CINTX_CNT[ECOUNT]
49 * is nonzero and this much time elapses after that.
51 #define CQ_TIMER_THRESH_DEFAULT 1 /* 1 usec */
52 #define CQ_TIMER_THRESH_MAX 25 /* 25 usec */
54 /* Min number of CQs (of the ones mapped to this CINT)
57 #define CQ_QCOUNT_DEFAULT 1
64 struct otx2_rcv_queue {
65 struct queue_stats stats;
71 u64 size[OTX2_MAX_FRAGS_IN_SQE];
72 u64 dma_addr[OTX2_MAX_FRAGS_IN_SQE];
75 struct otx2_snd_queue {
89 struct qmem *tso_hdrs;
91 struct qmem *timestamps;
92 struct queue_stats stats;
95 } ____cacheline_aligned_in_smp;
100 CQS_PER_CINT = 2, /* RQ + SQ */
103 struct otx2_cq_poll {
105 #define CINT_INVALID_CQ 255
107 u8 cq_ids[CQS_PER_CINT];
108 struct napi_struct napi;
113 struct qmem *fc_addr;
118 struct otx2_cq_queue {
121 u8 cint_idx; /* CQ interrupt id */
122 u8 refill_task_sched;
129 struct otx2_pool *rbpool;
130 } ____cacheline_aligned_in_smp;
134 u32 sqe_cnt; /* Keep these two at top */
135 #define OTX2_MAX_CQ_CNT 64
138 struct otx2_pool *pool;
139 struct otx2_cq_poll *napi;
140 struct otx2_cq_queue *cq;
141 struct otx2_snd_queue *sq;
142 struct otx2_rcv_queue *rq;
145 /* Translate IOVA to physical address */
146 static inline u64 otx2_iova_to_phys(void *iommu_domain, dma_addr_t dma_addr)
148 /* Translation is installed only when IOMMU is present */
149 if (likely(iommu_domain))
150 return iommu_iova_to_phys(iommu_domain, dma_addr);
154 int otx2_napi_handler(struct napi_struct *napi, int budget);
155 bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
156 struct sk_buff *skb, u16 qidx);
157 void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq,
159 void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
161 void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
162 void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
163 #endif /* OTX2_TXRX_H */