tools/sched_ext: scx_flatcg: fix potential stack overflow from VLA in fcg_read_stats
authorDavid Carlier <devnexen@gmail.com>
Sat, 14 Feb 2026 07:32:05 +0000 (07:32 +0000)
committerTejun Heo <tj@kernel.org>
Tue, 17 Feb 2026 07:01:18 +0000 (21:01 -1000)
fcg_read_stats() had a VLA allocating 21 * nr_cpus * 8 bytes on the
stack, risking stack overflow on large CPU counts (nr_cpus can be up
to 512).

Fix by using a single heap allocation with the correct size, reusing
it across all stat indices, and freeing it at the end.

Signed-off-by: David Carlier <devnexen@gmail.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
tools/sched_ext/scx_flatcg.c

index bea76d0..a844650 100644 (file)
@@ -102,22 +102,27 @@ static float read_cpu_util(__u64 *last_sum, __u64 *last_idle)
 
 static void fcg_read_stats(struct scx_flatcg *skel, __u64 *stats)
 {
-       __u64 cnts[FCG_NR_STATS][skel->rodata->nr_cpus];
+       __u64 *cnts;
        __u32 idx;
 
+       cnts = calloc(skel->rodata->nr_cpus, sizeof(__u64));
+       if (!cnts)
+               return;
+
        memset(stats, 0, sizeof(stats[0]) * FCG_NR_STATS);
-       memset(cnts, 0, sizeof(cnts));
 
        for (idx = 0; idx < FCG_NR_STATS; idx++) {
                int ret, cpu;
 
                ret = bpf_map_lookup_elem(bpf_map__fd(skel->maps.stats),
-                                         &idx, cnts[idx]);
+                                         &idx, cnts);
                if (ret < 0)
                        continue;
                for (cpu = 0; cpu < skel->rodata->nr_cpus; cpu++)
-                       stats[idx] += cnts[idx][cpu];
+                       stats[idx] += cnts[cpu];
        }
+
+       free(cnts);
 }
 
 int main(int argc, char **argv)