1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* XDP user-space ring structure
3 * Copyright(c) 2018 Intel Corporation.
6 #ifndef _LINUX_XSK_QUEUE_H
7 #define _LINUX_XSK_QUEUE_H
9 #include <linux/types.h>
10 #include <linux/if_xdp.h>
11 #include <net/xdp_sock.h>
12 #include <net/xsk_buff_pool.h>
17 u32 producer ____cacheline_aligned_in_smp;
18 u32 consumer ____cacheline_aligned_in_smp;
22 /* Used for the RX and TX queues for packets */
23 struct xdp_rxtx_ring {
25 struct xdp_desc desc[] ____cacheline_aligned_in_smp;
28 /* Used for the fill and completion queues for buffers */
29 struct xdp_umem_ring {
31 u64 desc[] ____cacheline_aligned_in_smp;
39 struct xdp_ring *ring;
41 u64 queue_empty_descs;
44 /* The structure of the shared state of the rings are the same as the
45 * ring buffer in kernel/events/ring_buffer.c. For the Rx and completion
46 * ring, the kernel is the producer and user space is the consumer. For
47 * the Tx and fill rings, the kernel is the consumer and user space is
52 * if (LOAD ->consumer) { LOAD ->producer
54 * STORE $data LOAD $data
55 * smp_wmb() (B) smp_mb() (D)
56 * STORE ->producer STORE ->consumer
59 * (A) pairs with (D), and (B) pairs with (C).
61 * Starting with (B), it protects the data from being written after
62 * the producer pointer. If this barrier was missing, the consumer
63 * could observe the producer pointer being set and thus load the data
64 * before the producer has written the new data. The consumer would in
65 * this case load the old data.
67 * (C) protects the consumer from speculatively loading the data before
68 * the producer pointer actually has been read. If we do not have this
69 * barrier, some architectures could load old data as speculative loads
70 * are not discarded as the CPU does not know there is a dependency
71 * between ->producer and data.
73 * (A) is a control dependency that separates the load of ->consumer
74 * from the stores of $data. In case ->consumer indicates there is no
75 * room in the buffer to store $data we do not. So no barrier is needed.
77 * (D) protects the load of the data to be observed to happen after the
78 * store of the consumer pointer. If we did not have this memory
79 * barrier, the producer could observe the consumer pointer being set
80 * and overwrite the data with a new value before the consumer got the
81 * chance to read the old value. The consumer would thus miss reading
82 * the old entry and very likely read the new entry twice, once right
83 * now and again after circling through the ring.
86 /* The operations on the rings are the following:
90 * RESERVE entries PEEK in the ring for entries
91 * WRITE data into the ring READ data from the ring
92 * SUBMIT entries RELEASE entries
94 * The producer reserves one or more entries in the ring. It can then
95 * fill in these entries and finally submit them so that they can be
96 * seen and read by the consumer.
98 * The consumer peeks into the ring to see if the producer has written
99 * any new entries. If so, the producer can then read these entries
100 * and when it is done reading them release them back to the producer
101 * so that the producer can use these slots to fill in new entries.
103 * The function names below reflect these operations.
106 /* Functions that read and validate content from consumer rings. */
108 static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
110 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
112 if (q->cached_cons != q->cached_prod) {
113 u32 idx = q->cached_cons & q->ring_mask;
115 *addr = ring->desc[idx];
122 static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
123 struct xdp_desc *desc)
125 u64 chunk, chunk_end;
127 chunk = xp_aligned_extract_addr(pool, desc->addr);
128 chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len);
129 if (chunk != chunk_end)
132 if (chunk >= pool->addrs_cnt)
140 static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool,
141 struct xdp_desc *desc)
145 base_addr = xp_unaligned_extract_addr(desc->addr);
146 addr = xp_unaligned_add_offset_to_addr(desc->addr);
148 if (desc->len > pool->chunk_size)
151 if (base_addr >= pool->addrs_cnt || addr >= pool->addrs_cnt ||
152 xp_desc_crosses_non_contig_pg(pool, addr, desc->len))
160 static inline bool xp_validate_desc(struct xsk_buff_pool *pool,
161 struct xdp_desc *desc)
163 return pool->unaligned ? xp_unaligned_validate_desc(pool, desc) :
164 xp_aligned_validate_desc(pool, desc);
167 static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q,
169 struct xdp_umem *umem)
171 if (!xp_validate_desc(umem->pool, d)) {
178 static inline bool xskq_cons_read_desc(struct xsk_queue *q,
179 struct xdp_desc *desc,
180 struct xdp_umem *umem)
182 while (q->cached_cons != q->cached_prod) {
183 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
184 u32 idx = q->cached_cons & q->ring_mask;
186 *desc = ring->desc[idx];
187 if (xskq_cons_is_valid_desc(q, desc, umem))
196 /* Functions for consumers */
198 static inline void __xskq_cons_release(struct xsk_queue *q)
200 smp_mb(); /* D, matches A */
201 WRITE_ONCE(q->ring->consumer, q->cached_cons);
204 static inline void __xskq_cons_peek(struct xsk_queue *q)
206 /* Refresh the local pointer */
207 q->cached_prod = READ_ONCE(q->ring->producer);
208 smp_rmb(); /* C, matches B */
211 static inline void xskq_cons_get_entries(struct xsk_queue *q)
213 __xskq_cons_release(q);
217 static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt)
219 u32 entries = q->cached_prod - q->cached_cons;
225 entries = q->cached_prod - q->cached_cons;
227 return entries >= cnt;
230 static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr)
232 if (q->cached_prod == q->cached_cons)
233 xskq_cons_get_entries(q);
234 return xskq_cons_read_addr_unchecked(q, addr);
237 static inline bool xskq_cons_peek_desc(struct xsk_queue *q,
238 struct xdp_desc *desc,
239 struct xdp_umem *umem)
241 if (q->cached_prod == q->cached_cons)
242 xskq_cons_get_entries(q);
243 return xskq_cons_read_desc(q, desc, umem);
246 static inline void xskq_cons_release(struct xsk_queue *q)
248 /* To improve performance, only update local state here.
249 * Reflect this to global state when we get new entries
250 * from the ring in xskq_cons_get_entries() and whenever
251 * Rx or Tx processing are completed in the NAPI loop.
256 static inline bool xskq_cons_is_full(struct xsk_queue *q)
258 /* No barriers needed since data is not accessed */
259 return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer) ==
263 /* Functions for producers */
265 static inline bool xskq_prod_is_full(struct xsk_queue *q)
267 u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons);
272 /* Refresh the local tail pointer */
273 q->cached_cons = READ_ONCE(q->ring->consumer);
274 free_entries = q->nentries - (q->cached_prod - q->cached_cons);
276 return !free_entries;
279 static inline int xskq_prod_reserve(struct xsk_queue *q)
281 if (xskq_prod_is_full(q))
289 static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr)
291 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
293 if (xskq_prod_is_full(q))
297 ring->desc[q->cached_prod++ & q->ring_mask] = addr;
301 static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
304 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
307 if (xskq_prod_is_full(q))
311 idx = q->cached_prod++ & q->ring_mask;
312 ring->desc[idx].addr = addr;
313 ring->desc[idx].len = len;
318 static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx)
320 smp_wmb(); /* B, matches C */
322 WRITE_ONCE(q->ring->producer, idx);
325 static inline void xskq_prod_submit(struct xsk_queue *q)
327 __xskq_prod_submit(q, q->cached_prod);
330 static inline void xskq_prod_submit_addr(struct xsk_queue *q, u64 addr)
332 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
333 u32 idx = q->ring->producer;
335 ring->desc[idx++ & q->ring_mask] = addr;
337 __xskq_prod_submit(q, idx);
340 static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries)
342 __xskq_prod_submit(q, q->ring->producer + nb_entries);
345 static inline bool xskq_prod_is_empty(struct xsk_queue *q)
347 /* No barriers needed since data is not accessed */
348 return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer);
351 /* For both producers and consumers */
353 static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
355 return q ? q->invalid_descs : 0;
358 static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q)
360 return q ? q->queue_empty_descs : 0;
363 struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
364 void xskq_destroy(struct xsk_queue *q_ops);
366 #endif /* _LINUX_XSK_QUEUE_H */