tools/sched_ext: Receive misc updates from SCX repo
authorTejun Heo <tj@kernel.org>
Wed, 25 Sep 2024 22:22:37 +0000 (12:22 -1000)
committerTejun Heo <tj@kernel.org>
Wed, 25 Sep 2024 22:22:37 +0000 (12:22 -1000)
Receive misc tools/sched_ext updates from https://github.com/sched-ext/scx
to sync userspace bits.

- LSP macros to help language servers.

- bpf_cpumask_weight() declaration and cast_mask() helper.

- Cosmetic updates to scx_flatcg.bpf.c.

Signed-off-by: Tejun Heo <tj@kernel.org>
tools/sched_ext/include/scx/common.bpf.h
tools/sched_ext/include/scx/user_exit_info.h
tools/sched_ext/scx_flatcg.bpf.c

index f538c75..225f61f 100644 (file)
@@ -7,7 +7,13 @@
 #ifndef __SCX_COMMON_BPF_H
 #define __SCX_COMMON_BPF_H
 
+#ifdef LSP
+#define __bpf__
+#include "../vmlinux/vmlinux.h"
+#else
 #include "vmlinux.h"
+#endif
+
 #include <bpf/bpf_helpers.h>
 #include <bpf/bpf_tracing.h>
 #include <asm-generic/errno.h>
@@ -309,6 +315,15 @@ void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __ksym
 u32 bpf_cpumask_any_distribute(const struct cpumask *cpumask) __ksym;
 u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1,
                                   const struct cpumask *src2) __ksym;
+u32 bpf_cpumask_weight(const struct cpumask *cpumask) __ksym;
+
+/*
+ * Access a cpumask in read-only mode (typically to check bits).
+ */
+const struct cpumask *cast_mask(struct bpf_cpumask *mask)
+{
+       return (const struct cpumask *)mask;
+}
 
 /* rcu */
 void bpf_rcu_read_lock(void) __ksym;
index 891693e..8ce2734 100644 (file)
@@ -25,7 +25,11 @@ struct user_exit_info {
 
 #ifdef __bpf__
 
+#ifdef LSP
+#include "../vmlinux/vmlinux.h"
+#else
 #include "vmlinux.h"
+#endif
 #include <bpf/bpf_core_read.h>
 
 #define UEI_DEFINE(__name)                                                     \
index 936415b..e272bc3 100644 (file)
@@ -225,7 +225,7 @@ static void cgrp_refresh_hweight(struct cgroup *cgrp, struct fcg_cgrp_ctx *cgc)
                                break;
 
                        /*
-                        * We can be oppotunistic here and not grab the
+                        * We can be opportunistic here and not grab the
                         * cgv_tree_lock and deal with the occasional races.
                         * However, hweight updates are already cached and
                         * relatively low-frequency. Let's just do the
@@ -258,8 +258,7 @@ static void cgrp_cap_budget(struct cgv_node *cgv_node, struct fcg_cgrp_ctx *cgc)
         * and thus can't be updated and repositioned. Instead, we collect the
         * vtime deltas separately and apply it asynchronously here.
         */
-       delta = cgc->cvtime_delta;
-       __sync_fetch_and_sub(&cgc->cvtime_delta, delta);
+       delta = __sync_fetch_and_sub(&cgc->cvtime_delta, cgc->cvtime_delta);
        cvtime = cgv_node->cvtime + delta;
 
        /*