1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* XDP user-space ring structure
3 * Copyright(c) 2018 Intel Corporation.
6 #ifndef _LINUX_XSK_QUEUE_H
7 #define _LINUX_XSK_QUEUE_H
9 #include <linux/types.h>
10 #include <linux/if_xdp.h>
11 #include <net/xdp_sock.h>
13 #define RX_BATCH_SIZE 16
14 #define LAZY_UPDATE_THRESHOLD 128
17 u32 producer ____cacheline_aligned_in_smp;
18 u32 consumer ____cacheline_aligned_in_smp;
21 /* Used for the RX and TX queues for packets */
22 struct xdp_rxtx_ring {
24 struct xdp_desc desc[0] ____cacheline_aligned_in_smp;
27 /* Used for the fill and completion queues for buffers */
28 struct xdp_umem_ring {
30 u64 desc[0] ____cacheline_aligned_in_smp;
42 struct xdp_ring *ring;
46 /* The structure of the shared state of the rings are the same as the
47 * ring buffer in kernel/events/ring_buffer.c. For the Rx and completion
48 * ring, the kernel is the producer and user space is the consumer. For
49 * the Tx and fill rings, the kernel is the consumer and user space is
54 * if (LOAD ->consumer) { LOAD ->producer
56 * STORE $data LOAD $data
57 * smp_wmb() (B) smp_mb() (D)
58 * STORE ->producer STORE ->consumer
61 * (A) pairs with (D), and (B) pairs with (C).
63 * Starting with (B), it protects the data from being written after
64 * the producer pointer. If this barrier was missing, the consumer
65 * could observe the producer pointer being set and thus load the data
66 * before the producer has written the new data. The consumer would in
67 * this case load the old data.
69 * (C) protects the consumer from speculatively loading the data before
70 * the producer pointer actually has been read. If we do not have this
71 * barrier, some architectures could load old data as speculative loads
72 * are not discarded as the CPU does not know there is a dependency
73 * between ->producer and data.
75 * (A) is a control dependency that separates the load of ->consumer
76 * from the stores of $data. In case ->consumer indicates there is no
77 * room in the buffer to store $data we do not. So no barrier is needed.
79 * (D) protects the load of the data to be observed to happen after the
80 * store of the consumer pointer. If we did not have this memory
81 * barrier, the producer could observe the consumer pointer being set
82 * and overwrite the data with a new value before the consumer got the
83 * chance to read the old value. The consumer would thus miss reading
84 * the old entry and very likely read the new entry twice, once right
85 * now and again after circling through the ring.
88 /* Common functions operating for both RXTX and umem queues */
90 static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
92 return q ? q->invalid_descs : 0;
95 static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt)
97 u32 entries = q->prod_tail - q->cons_tail;
100 /* Refresh the local pointer */
101 q->prod_tail = READ_ONCE(q->ring->producer);
102 entries = q->prod_tail - q->cons_tail;
105 return (entries > dcnt) ? dcnt : entries;
108 static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt)
110 u32 free_entries = q->nentries - (producer - q->cons_tail);
112 if (free_entries >= dcnt)
115 /* Refresh the local tail pointer */
116 q->cons_tail = READ_ONCE(q->ring->consumer);
117 return q->nentries - (producer - q->cons_tail);
120 static inline bool xskq_has_addrs(struct xsk_queue *q, u32 cnt)
122 u32 entries = q->prod_tail - q->cons_tail;
127 /* Refresh the local pointer. */
128 q->prod_tail = READ_ONCE(q->ring->producer);
129 entries = q->prod_tail - q->cons_tail;
131 return entries >= cnt;
136 static inline bool xskq_is_valid_addr(struct xsk_queue *q, u64 addr)
138 if (addr >= q->size) {
146 static inline u64 *xskq_validate_addr(struct xsk_queue *q, u64 *addr)
148 while (q->cons_tail != q->cons_head) {
149 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
150 unsigned int idx = q->cons_tail & q->ring_mask;
152 *addr = READ_ONCE(ring->desc[idx]) & q->chunk_mask;
153 if (xskq_is_valid_addr(q, *addr))
162 static inline u64 *xskq_peek_addr(struct xsk_queue *q, u64 *addr)
164 if (q->cons_tail == q->cons_head) {
165 smp_mb(); /* D, matches A */
166 WRITE_ONCE(q->ring->consumer, q->cons_tail);
167 q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
169 /* Order consumer and data */
173 return xskq_validate_addr(q, addr);
176 static inline void xskq_discard_addr(struct xsk_queue *q)
181 static inline int xskq_produce_addr(struct xsk_queue *q, u64 addr)
183 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
185 if (xskq_nb_free(q, q->prod_tail, 1) == 0)
189 ring->desc[q->prod_tail++ & q->ring_mask] = addr;
191 /* Order producer and data */
192 smp_wmb(); /* B, matches C */
194 WRITE_ONCE(q->ring->producer, q->prod_tail);
198 static inline int xskq_produce_addr_lazy(struct xsk_queue *q, u64 addr)
200 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
202 if (xskq_nb_free(q, q->prod_head, LAZY_UPDATE_THRESHOLD) == 0)
206 ring->desc[q->prod_head++ & q->ring_mask] = addr;
210 static inline void xskq_produce_flush_addr_n(struct xsk_queue *q,
213 /* Order producer and data */
214 smp_wmb(); /* B, matches C */
216 q->prod_tail += nb_entries;
217 WRITE_ONCE(q->ring->producer, q->prod_tail);
220 static inline int xskq_reserve_addr(struct xsk_queue *q)
222 if (xskq_nb_free(q, q->prod_head, 1) == 0)
232 static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d)
234 if (!xskq_is_valid_addr(q, d->addr))
237 if (((d->addr + d->len) & q->chunk_mask) != (d->addr & q->chunk_mask) ||
246 static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q,
247 struct xdp_desc *desc)
249 while (q->cons_tail != q->cons_head) {
250 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
251 unsigned int idx = q->cons_tail & q->ring_mask;
253 *desc = READ_ONCE(ring->desc[idx]);
254 if (xskq_is_valid_desc(q, desc))
263 static inline struct xdp_desc *xskq_peek_desc(struct xsk_queue *q,
264 struct xdp_desc *desc)
266 if (q->cons_tail == q->cons_head) {
267 smp_mb(); /* D, matches A */
268 WRITE_ONCE(q->ring->consumer, q->cons_tail);
269 q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
271 /* Order consumer and data */
272 smp_rmb(); /* C, matches B */
275 return xskq_validate_desc(q, desc);
278 static inline void xskq_discard_desc(struct xsk_queue *q)
283 static inline int xskq_produce_batch_desc(struct xsk_queue *q,
286 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
289 if (xskq_nb_free(q, q->prod_head, 1) == 0)
293 idx = (q->prod_head++) & q->ring_mask;
294 ring->desc[idx].addr = addr;
295 ring->desc[idx].len = len;
300 static inline void xskq_produce_flush_desc(struct xsk_queue *q)
302 /* Order producer and data */
303 smp_wmb(); /* B, matches C */
305 q->prod_tail = q->prod_head;
306 WRITE_ONCE(q->ring->producer, q->prod_tail);
309 static inline bool xskq_full_desc(struct xsk_queue *q)
311 return xskq_nb_avail(q, q->nentries) == q->nentries;
314 static inline bool xskq_empty_desc(struct xsk_queue *q)
316 return xskq_nb_free(q, q->prod_tail, q->nentries) == q->nentries;
319 void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask);
320 struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
321 void xskq_destroy(struct xsk_queue *q_ops);
323 /* Executed by the core when the entire UMEM gets freed */
324 void xsk_reuseq_destroy(struct xdp_umem *umem);
326 #endif /* _LINUX_XSK_QUEUE_H */