1 // SPDX-License-Identifier: GPL-2.0
13 #define SMP_CACHE_BYTES 64
14 #define cache_line_size() SMP_CACHE_BYTES
15 #define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES)))
16 #define unlikely(x) (__builtin_expect(!!(x), 0))
17 #define likely(x) (__builtin_expect(!!(x), 1))
18 #define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a))
19 #define SIZE_MAX (~(size_t)0)
21 typedef pthread_spinlock_t spinlock_t;
24 #define __GFP_ZERO 0x1
26 static void *kmalloc(unsigned size, gfp_t gfp)
28 void *p = memalign(64, size);
37 static inline void *kzalloc(unsigned size, gfp_t flags)
39 return kmalloc(size, flags | __GFP_ZERO);
42 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
44 if (size != 0 && n > SIZE_MAX / size)
46 return kmalloc(n * size, flags);
49 static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
51 return kmalloc_array(n, size, flags | __GFP_ZERO);
54 static void kfree(void *p)
60 static void spin_lock_init(spinlock_t *lock)
62 int r = pthread_spin_init(lock, 0);
66 static void spin_lock(spinlock_t *lock)
68 int ret = pthread_spin_lock(lock);
72 static void spin_unlock(spinlock_t *lock)
74 int ret = pthread_spin_unlock(lock);
78 static void spin_lock_bh(spinlock_t *lock)
83 static void spin_unlock_bh(spinlock_t *lock)
88 static void spin_lock_irq(spinlock_t *lock)
93 static void spin_unlock_irq(spinlock_t *lock)
98 static void spin_lock_irqsave(spinlock_t *lock, unsigned long f)
103 static void spin_unlock_irqrestore(spinlock_t *lock, unsigned long f)
108 #include "../../../include/linux/ptr_ring.h"
110 static unsigned long long headcnt, tailcnt;
111 static struct ptr_ring array ____cacheline_aligned_in_smp;
113 /* implemented by ring */
114 void alloc_ring(void)
116 int ret = ptr_ring_init(&array, ring_size, 0);
118 /* Hacky way to poke at ring internals. Useful for testing though. */
124 int add_inbuf(unsigned len, void *buf, void *datap)
128 ret = __ptr_ring_produce(&array, buf);
138 * ptr_ring API provides no way for producer to find out whether a given
139 * buffer was consumed. Our tests merely require that a successful get_buf
140 * implies that add_inbuf succeed in the past, and that add_inbuf will succeed,
141 * fake it accordingly.
143 void *get_buf(unsigned *lenp, void **bufp)
147 if (tailcnt == headcnt || __ptr_ring_full(&array))
159 return (tailcnt == headcnt || __ptr_ring_full(&array));
172 void kick_available(void)
190 return !__ptr_ring_peek(&array);
193 bool use_buf(unsigned *lenp, void **bufp)
197 ptr = __ptr_ring_consume(&array);