#ifndef __SCX_COMMON_BPF_H
#define __SCX_COMMON_BPF_H
+#ifdef LSP
+#define __bpf__
+#include "../vmlinux/vmlinux.h"
+#else
#include "vmlinux.h"
+#endif
+
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <asm-generic/errno.h>
u32 bpf_cpumask_any_distribute(const struct cpumask *cpumask) __ksym;
u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1,
const struct cpumask *src2) __ksym;
+u32 bpf_cpumask_weight(const struct cpumask *cpumask) __ksym;
+
+/*
+ * Access a cpumask in read-only mode (typically to check bits).
+ */
+const struct cpumask *cast_mask(struct bpf_cpumask *mask)
+{
+ return (const struct cpumask *)mask;
+}
/* rcu */
void bpf_rcu_read_lock(void) __ksym;
#ifdef __bpf__
+#ifdef LSP
+#include "../vmlinux/vmlinux.h"
+#else
#include "vmlinux.h"
+#endif
#include <bpf/bpf_core_read.h>
#define UEI_DEFINE(__name) \
break;
/*
- * We can be oppotunistic here and not grab the
+ * We can be opportunistic here and not grab the
* cgv_tree_lock and deal with the occasional races.
* However, hweight updates are already cached and
* relatively low-frequency. Let's just do the
* and thus can't be updated and repositioned. Instead, we collect the
* vtime deltas separately and apply it asynchronously here.
*/
- delta = cgc->cvtime_delta;
- __sync_fetch_and_sub(&cgc->cvtime_delta, delta);
+ delta = __sync_fetch_and_sub(&cgc->cvtime_delta, cgc->cvtime_delta);
cvtime = cgv_node->cvtime + delta;
/*