1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2018 Facebook
11 #include <linux/perf_event.h>
12 #include <sys/ioctl.h>
14 #include <sys/types.h>
17 #include <linux/bpf.h>
19 #include <bpf/libbpf.h>
21 #include "cgroup_helpers.h"
22 #include "bpf_rlimit.h"
24 #define CHECK(condition, tag, format...) ({ \
25 int __ret = !!(condition); \
27 printf("%s:FAIL:%s ", __func__, tag); \
30 printf("%s:PASS:%s\n", __func__, tag); \
35 static int bpf_find_map(const char *test, struct bpf_object *obj,
40 map = bpf_object__find_map_by_name(obj, name);
43 return bpf_map__fd(map);
46 #define TEST_CGROUP "/test-bpf-get-cgroup-id/"
48 int main(int argc, char **argv)
50 const char *probe_name = "syscalls/sys_enter_nanosleep";
51 const char *file = "get_cgroup_id_kern.o";
52 int err, bytes, efd, prog_fd, pmu_fd;
53 int cgroup_fd, cgidmap_fd, pidmap_fd;
54 struct perf_event_attr attr = {};
55 struct bpf_object *obj;
56 __u64 kcgid = 0, ucgid;
61 cgroup_fd = cgroup_setup_and_join(TEST_CGROUP);
62 if (CHECK(cgroup_fd < 0, "cgroup_setup_and_join", "err %d errno %d\n", cgroup_fd, errno))
65 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
66 if (CHECK(err, "bpf_prog_load", "err %d errno %d\n", err, errno))
67 goto cleanup_cgroup_env;
69 cgidmap_fd = bpf_find_map(__func__, obj, "cg_ids");
70 if (CHECK(cgidmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
74 pidmap_fd = bpf_find_map(__func__, obj, "pidmap");
75 if (CHECK(pidmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
80 bpf_map_update_elem(pidmap_fd, &key, &pid, 0);
82 snprintf(buf, sizeof(buf),
83 "/sys/kernel/debug/tracing/events/%s/id", probe_name);
84 efd = open(buf, O_RDONLY, 0);
85 if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
87 bytes = read(efd, buf, sizeof(buf));
89 if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read",
90 "bytes %d errno %d\n", bytes, errno))
93 attr.config = strtol(buf, NULL, 0);
94 attr.type = PERF_TYPE_TRACEPOINT;
95 attr.sample_type = PERF_SAMPLE_RAW;
96 attr.sample_period = 1;
97 attr.wakeup_events = 1;
99 /* attach to this pid so the all bpf invocations will be in the
100 * cgroup associated with this pid.
102 pmu_fd = syscall(__NR_perf_event_open, &attr, getpid(), -1, -1, 0);
103 if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd,
107 err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
108 if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", err,
112 err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
113 if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", err,
117 /* trigger some syscalls */
120 err = bpf_map_lookup_elem(cgidmap_fd, &key, &kcgid);
121 if (CHECK(err, "bpf_map_lookup_elem", "err %d errno %d\n", err, errno))
124 ucgid = get_cgroup_id(TEST_CGROUP);
125 if (CHECK(kcgid != ucgid, "compare_cgroup_id",
126 "kern cgid %llx user cgid %llx", kcgid, ucgid))
130 printf("%s:PASS\n", argv[0]);
135 bpf_object__close(obj);
137 cleanup_cgroup_environment();