#define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__)
#define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__)
+/* Use these barrier functions instead of smp_[rw]mb() when they are
+ * used in a libbpf header file. That way they can be built into the
+ * application that uses libbpf.
+ */
+#if defined(__i386__) || defined(__x86_64__)
+# define libbpf_smp_rmb() asm volatile("" : : : "memory")
+# define libbpf_smp_wmb() asm volatile("" : : : "memory")
+# define libbpf_smp_mb() \
+ asm volatile("lock; addl $0,-4(%%rsp)" : : : "memory", "cc")
+#elif defined(__aarch64__)
+# define libbpf_smp_rmb() asm volatile("dmb ishld" : : : "memory")
+# define libbpf_smp_wmb() asm volatile("dmb ishst" : : : "memory")
+# define libbpf_smp_mb() asm volatile("dmb ish" : : : "memory")
+#elif defined(__arm__)
+/* These are only valid for armv7 and above */
+# define libbpf_smp_rmb() asm volatile("dmb ish" : : : "memory")
+# define libbpf_smp_wmb() asm volatile("dmb ishst" : : : "memory")
+# define libbpf_smp_mb() asm volatile("dmb ish" : : : "memory")
+#else
+# warning Architecture missing native barrier functions in libbpf_util.h.
+# define libbpf_smp_rmb() __sync_synchronize()
+# define libbpf_smp_wmb() __sync_synchronize()
+# define libbpf_smp_mb() __sync_synchronize()
+#endif
+
#ifdef __cplusplus
} /* extern "C" */
#endif
#include <linux/if_xdp.h>
#include "libbpf.h"
+#include "libbpf_util.h"
#ifdef __cplusplus
extern "C" {
/* Make sure everything has been written to the ring before indicating
* this to the kernel by writing the producer pointer.
*/
- smp_wmb();
+ libbpf_smp_wmb();
*prod->producer += nb;
}
/* Make sure we do not speculatively read the data before
* we have received the packet buffers from the ring.
*/
- smp_rmb();
+ libbpf_smp_rmb();
*idx = cons->cached_cons;
cons->cached_cons += entries;
/* Make sure data has been read before indicating we are done
* with the entries by updating the consumer pointer.
*/
- smp_mb();
+ libbpf_smp_mb();
*cons->consumer += nb;
}