1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* XDP user-space ring structure
3 * Copyright(c) 2018 Intel Corporation.
6 #ifndef _LINUX_XSK_QUEUE_H
7 #define _LINUX_XSK_QUEUE_H
9 #include <linux/types.h>
10 #include <linux/if_xdp.h>
11 #include <net/xdp_sock.h>
13 #define RX_BATCH_SIZE 16
14 #define LAZY_UPDATE_THRESHOLD 128
17 u32 producer ____cacheline_aligned_in_smp;
18 u32 consumer ____cacheline_aligned_in_smp;
21 /* Used for the RX and TX queues for packets */
22 struct xdp_rxtx_ring {
24 struct xdp_desc desc[0] ____cacheline_aligned_in_smp;
27 /* Used for the fill and completion queues for buffers */
28 struct xdp_umem_ring {
30 u64 desc[0] ____cacheline_aligned_in_smp;
34 struct xdp_umem_props umem_props;
41 struct xdp_ring *ring;
45 /* Common functions operating for both RXTX and umem queues */
47 static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
49 return q ? q->invalid_descs : 0;
52 static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt)
54 u32 entries = q->prod_tail - q->cons_tail;
57 /* Refresh the local pointer */
58 q->prod_tail = READ_ONCE(q->ring->producer);
59 entries = q->prod_tail - q->cons_tail;
62 return (entries > dcnt) ? dcnt : entries;
65 static inline u32 xskq_nb_free_lazy(struct xsk_queue *q, u32 producer)
67 return q->nentries - (producer - q->cons_tail);
70 static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt)
72 u32 free_entries = xskq_nb_free_lazy(q, producer);
74 if (free_entries >= dcnt)
77 /* Refresh the local tail pointer */
78 q->cons_tail = READ_ONCE(q->ring->consumer);
79 return q->nentries - (producer - q->cons_tail);
84 static inline bool xskq_is_valid_addr(struct xsk_queue *q, u64 addr)
86 if (addr >= q->umem_props.size) {
94 static inline u64 *xskq_validate_addr(struct xsk_queue *q, u64 *addr)
96 while (q->cons_tail != q->cons_head) {
97 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
98 unsigned int idx = q->cons_tail & q->ring_mask;
100 *addr = READ_ONCE(ring->desc[idx]) & q->umem_props.chunk_mask;
101 if (xskq_is_valid_addr(q, *addr))
110 static inline u64 *xskq_peek_addr(struct xsk_queue *q, u64 *addr)
112 if (q->cons_tail == q->cons_head) {
113 WRITE_ONCE(q->ring->consumer, q->cons_tail);
114 q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
116 /* Order consumer and data */
120 return xskq_validate_addr(q, addr);
123 static inline void xskq_discard_addr(struct xsk_queue *q)
128 static inline int xskq_produce_addr(struct xsk_queue *q, u64 addr)
130 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
132 if (xskq_nb_free(q, q->prod_tail, LAZY_UPDATE_THRESHOLD) == 0)
135 ring->desc[q->prod_tail++ & q->ring_mask] = addr;
137 /* Order producer and data */
140 WRITE_ONCE(q->ring->producer, q->prod_tail);
144 static inline int xskq_produce_addr_lazy(struct xsk_queue *q, u64 addr)
146 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
148 if (xskq_nb_free(q, q->prod_head, LAZY_UPDATE_THRESHOLD) == 0)
151 ring->desc[q->prod_head++ & q->ring_mask] = addr;
155 static inline void xskq_produce_flush_addr_n(struct xsk_queue *q,
158 /* Order producer and data */
161 q->prod_tail += nb_entries;
162 WRITE_ONCE(q->ring->producer, q->prod_tail);
165 static inline int xskq_reserve_addr(struct xsk_queue *q)
167 if (xskq_nb_free(q, q->prod_head, 1) == 0)
176 static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d)
178 if (!xskq_is_valid_addr(q, d->addr))
181 if (((d->addr + d->len) & q->umem_props.chunk_mask) !=
182 (d->addr & q->umem_props.chunk_mask)) {
190 static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q,
191 struct xdp_desc *desc)
193 while (q->cons_tail != q->cons_head) {
194 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
195 unsigned int idx = q->cons_tail & q->ring_mask;
197 *desc = READ_ONCE(ring->desc[idx]);
198 if (xskq_is_valid_desc(q, desc))
207 static inline struct xdp_desc *xskq_peek_desc(struct xsk_queue *q,
208 struct xdp_desc *desc)
210 if (q->cons_tail == q->cons_head) {
211 WRITE_ONCE(q->ring->consumer, q->cons_tail);
212 q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
214 /* Order consumer and data */
218 return xskq_validate_desc(q, desc);
221 static inline void xskq_discard_desc(struct xsk_queue *q)
226 static inline int xskq_produce_batch_desc(struct xsk_queue *q,
229 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
232 if (xskq_nb_free(q, q->prod_head, 1) == 0)
235 idx = (q->prod_head++) & q->ring_mask;
236 ring->desc[idx].addr = addr;
237 ring->desc[idx].len = len;
242 static inline void xskq_produce_flush_desc(struct xsk_queue *q)
244 /* Order producer and data */
247 q->prod_tail = q->prod_head,
248 WRITE_ONCE(q->ring->producer, q->prod_tail);
251 static inline bool xskq_full_desc(struct xsk_queue *q)
253 return xskq_nb_avail(q, q->nentries) == q->nentries;
256 static inline bool xskq_empty_desc(struct xsk_queue *q)
258 return xskq_nb_free(q, q->prod_tail, 1) == q->nentries;
261 void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props);
262 struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
263 void xskq_destroy(struct xsk_queue *q_ops);
265 #endif /* _LINUX_XSK_QUEUE_H */