1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2020 Facebook */
6 #include <linux/netfilter.h>
7 #include <linux/netfilter_arp.h>
8 #include <linux/perf_event.h>
14 #include <bpf/hashmap.h>
16 #include "json_writer.h"
18 #include "xlated_dumper.h"
20 #define PERF_HW_CACHE_LEN 128
22 static struct hashmap *link_table;
23 static struct dump_data dd;
25 static const char *perf_type_name[PERF_TYPE_MAX] = {
26 [PERF_TYPE_HARDWARE] = "hardware",
27 [PERF_TYPE_SOFTWARE] = "software",
28 [PERF_TYPE_TRACEPOINT] = "tracepoint",
29 [PERF_TYPE_HW_CACHE] = "hw-cache",
30 [PERF_TYPE_RAW] = "raw",
31 [PERF_TYPE_BREAKPOINT] = "breakpoint",
34 const char *event_symbols_hw[PERF_COUNT_HW_MAX] = {
35 [PERF_COUNT_HW_CPU_CYCLES] = "cpu-cycles",
36 [PERF_COUNT_HW_INSTRUCTIONS] = "instructions",
37 [PERF_COUNT_HW_CACHE_REFERENCES] = "cache-references",
38 [PERF_COUNT_HW_CACHE_MISSES] = "cache-misses",
39 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "branch-instructions",
40 [PERF_COUNT_HW_BRANCH_MISSES] = "branch-misses",
41 [PERF_COUNT_HW_BUS_CYCLES] = "bus-cycles",
42 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = "stalled-cycles-frontend",
43 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = "stalled-cycles-backend",
44 [PERF_COUNT_HW_REF_CPU_CYCLES] = "ref-cycles",
47 const char *event_symbols_sw[PERF_COUNT_SW_MAX] = {
48 [PERF_COUNT_SW_CPU_CLOCK] = "cpu-clock",
49 [PERF_COUNT_SW_TASK_CLOCK] = "task-clock",
50 [PERF_COUNT_SW_PAGE_FAULTS] = "page-faults",
51 [PERF_COUNT_SW_CONTEXT_SWITCHES] = "context-switches",
52 [PERF_COUNT_SW_CPU_MIGRATIONS] = "cpu-migrations",
53 [PERF_COUNT_SW_PAGE_FAULTS_MIN] = "minor-faults",
54 [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = "major-faults",
55 [PERF_COUNT_SW_ALIGNMENT_FAULTS] = "alignment-faults",
56 [PERF_COUNT_SW_EMULATION_FAULTS] = "emulation-faults",
57 [PERF_COUNT_SW_DUMMY] = "dummy",
58 [PERF_COUNT_SW_BPF_OUTPUT] = "bpf-output",
59 [PERF_COUNT_SW_CGROUP_SWITCHES] = "cgroup-switches",
62 const char *evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX] = {
63 [PERF_COUNT_HW_CACHE_L1D] = "L1-dcache",
64 [PERF_COUNT_HW_CACHE_L1I] = "L1-icache",
65 [PERF_COUNT_HW_CACHE_LL] = "LLC",
66 [PERF_COUNT_HW_CACHE_DTLB] = "dTLB",
67 [PERF_COUNT_HW_CACHE_ITLB] = "iTLB",
68 [PERF_COUNT_HW_CACHE_BPU] = "branch",
69 [PERF_COUNT_HW_CACHE_NODE] = "node",
72 const char *evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX] = {
73 [PERF_COUNT_HW_CACHE_OP_READ] = "load",
74 [PERF_COUNT_HW_CACHE_OP_WRITE] = "store",
75 [PERF_COUNT_HW_CACHE_OP_PREFETCH] = "prefetch",
78 const char *evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
79 [PERF_COUNT_HW_CACHE_RESULT_ACCESS] = "refs",
80 [PERF_COUNT_HW_CACHE_RESULT_MISS] = "misses",
83 #define perf_event_name(array, id) ({ \
84 const char *event_str = NULL; \
86 if ((id) < ARRAY_SIZE(array)) \
87 event_str = array[id]; \
91 static int link_parse_fd(int *argc, char ***argv)
95 if (is_prefix(**argv, "id")) {
101 id = strtoul(**argv, &endptr, 0);
103 p_err("can't parse %s as ID", **argv);
108 fd = bpf_link_get_fd_by_id(id);
110 p_err("failed to get link with ID %d: %s", id, strerror(errno));
112 } else if (is_prefix(**argv, "pinned")) {
120 return open_obj_pinned_any(path, BPF_OBJ_LINK);
123 p_err("expected 'id' or 'pinned', got: '%s'?", **argv);
128 show_link_header_json(struct bpf_link_info *info, json_writer_t *wtr)
130 const char *link_type_str;
132 jsonw_uint_field(wtr, "id", info->id);
133 link_type_str = libbpf_bpf_link_type_str(info->type);
135 jsonw_string_field(wtr, "type", link_type_str);
137 jsonw_uint_field(wtr, "type", info->type);
139 jsonw_uint_field(json_wtr, "prog_id", info->prog_id);
142 static void show_link_attach_type_json(__u32 attach_type, json_writer_t *wtr)
144 const char *attach_type_str;
146 attach_type_str = libbpf_bpf_attach_type_str(attach_type);
148 jsonw_string_field(wtr, "attach_type", attach_type_str);
150 jsonw_uint_field(wtr, "attach_type", attach_type);
153 static void show_link_ifindex_json(__u32 ifindex, json_writer_t *wtr)
155 char devname[IF_NAMESIZE] = "(unknown)";
158 if_indextoname(ifindex, devname);
160 snprintf(devname, sizeof(devname), "(detached)");
161 jsonw_string_field(wtr, "devname", devname);
162 jsonw_uint_field(wtr, "ifindex", ifindex);
165 static bool is_iter_map_target(const char *target_name)
167 return strcmp(target_name, "bpf_map_elem") == 0 ||
168 strcmp(target_name, "bpf_sk_storage_map") == 0;
171 static bool is_iter_cgroup_target(const char *target_name)
173 return strcmp(target_name, "cgroup") == 0;
176 static const char *cgroup_order_string(__u32 order)
179 case BPF_CGROUP_ITER_ORDER_UNSPEC:
180 return "order_unspec";
181 case BPF_CGROUP_ITER_SELF_ONLY:
183 case BPF_CGROUP_ITER_DESCENDANTS_PRE:
184 return "descendants_pre";
185 case BPF_CGROUP_ITER_DESCENDANTS_POST:
186 return "descendants_post";
187 case BPF_CGROUP_ITER_ANCESTORS_UP:
188 return "ancestors_up";
189 default: /* won't happen */
194 static bool is_iter_task_target(const char *target_name)
196 return strcmp(target_name, "task") == 0 ||
197 strcmp(target_name, "task_file") == 0 ||
198 strcmp(target_name, "task_vma") == 0;
201 static void show_iter_json(struct bpf_link_info *info, json_writer_t *wtr)
203 const char *target_name = u64_to_ptr(info->iter.target_name);
205 jsonw_string_field(wtr, "target_name", target_name);
207 if (is_iter_map_target(target_name))
208 jsonw_uint_field(wtr, "map_id", info->iter.map.map_id);
209 else if (is_iter_task_target(target_name)) {
210 if (info->iter.task.tid)
211 jsonw_uint_field(wtr, "tid", info->iter.task.tid);
212 else if (info->iter.task.pid)
213 jsonw_uint_field(wtr, "pid", info->iter.task.pid);
216 if (is_iter_cgroup_target(target_name)) {
217 jsonw_lluint_field(wtr, "cgroup_id", info->iter.cgroup.cgroup_id);
218 jsonw_string_field(wtr, "order",
219 cgroup_order_string(info->iter.cgroup.order));
223 void netfilter_dump_json(const struct bpf_link_info *info, json_writer_t *wtr)
225 jsonw_uint_field(json_wtr, "pf",
227 jsonw_uint_field(json_wtr, "hook",
228 info->netfilter.hooknum);
229 jsonw_int_field(json_wtr, "prio",
230 info->netfilter.priority);
231 jsonw_uint_field(json_wtr, "flags",
232 info->netfilter.flags);
235 static int get_prog_info(int prog_id, struct bpf_prog_info *info)
237 __u32 len = sizeof(*info);
240 prog_fd = bpf_prog_get_fd_by_id(prog_id);
244 memset(info, 0, sizeof(*info));
245 err = bpf_prog_get_info_by_fd(prog_fd, info, &len);
247 p_err("can't get prog info: %s", strerror(errno));
257 static int cmp_addr_cookie(const void *A, const void *B)
259 const struct addr_cookie *a = A, *b = B;
261 if (a->addr == b->addr)
263 return a->addr < b->addr ? -1 : 1;
266 static struct addr_cookie *
267 get_addr_cookie_array(__u64 *addrs, __u64 *cookies, __u32 count)
269 struct addr_cookie *data;
272 data = calloc(count, sizeof(data[0]));
274 p_err("mem alloc failed");
277 for (i = 0; i < count; i++) {
278 data[i].addr = addrs[i];
279 data[i].cookie = cookies[i];
281 qsort(data, count, sizeof(data[0]), cmp_addr_cookie);
286 show_kprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr)
288 struct addr_cookie *data;
291 jsonw_bool_field(json_wtr, "retprobe",
292 info->kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN);
293 jsonw_uint_field(json_wtr, "func_cnt", info->kprobe_multi.count);
294 jsonw_uint_field(json_wtr, "missed", info->kprobe_multi.missed);
295 jsonw_name(json_wtr, "funcs");
296 jsonw_start_array(json_wtr);
297 data = get_addr_cookie_array(u64_to_ptr(info->kprobe_multi.addrs),
298 u64_to_ptr(info->kprobe_multi.cookies),
299 info->kprobe_multi.count);
303 /* Load it once for all. */
305 kernel_syms_load(&dd);
309 for (i = 0; i < dd.sym_count; i++) {
310 if (dd.sym_mapping[i].address != data[j].addr)
312 jsonw_start_object(json_wtr);
313 jsonw_uint_field(json_wtr, "addr", dd.sym_mapping[i].address);
314 jsonw_string_field(json_wtr, "func", dd.sym_mapping[i].name);
315 /* Print null if it is vmlinux */
316 if (dd.sym_mapping[i].module[0] == '\0') {
317 jsonw_name(json_wtr, "module");
318 jsonw_null(json_wtr);
320 jsonw_string_field(json_wtr, "module", dd.sym_mapping[i].module);
322 jsonw_uint_field(json_wtr, "cookie", data[j].cookie);
323 jsonw_end_object(json_wtr);
324 if (j++ == info->kprobe_multi.count)
327 jsonw_end_array(json_wtr);
332 static __u64 *u64_to_arr(__u64 val)
334 return (__u64 *) u64_to_ptr(val);
338 show_uprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr)
342 jsonw_bool_field(json_wtr, "retprobe",
343 info->uprobe_multi.flags & BPF_F_UPROBE_MULTI_RETURN);
344 jsonw_string_field(json_wtr, "path", (char *) u64_to_ptr(info->uprobe_multi.path));
345 jsonw_uint_field(json_wtr, "func_cnt", info->uprobe_multi.count);
346 jsonw_int_field(json_wtr, "pid", (int) info->uprobe_multi.pid);
347 jsonw_name(json_wtr, "funcs");
348 jsonw_start_array(json_wtr);
350 for (i = 0; i < info->uprobe_multi.count; i++) {
351 jsonw_start_object(json_wtr);
352 jsonw_uint_field(json_wtr, "offset",
353 u64_to_arr(info->uprobe_multi.offsets)[i]);
354 jsonw_uint_field(json_wtr, "ref_ctr_offset",
355 u64_to_arr(info->uprobe_multi.ref_ctr_offsets)[i]);
356 jsonw_uint_field(json_wtr, "cookie",
357 u64_to_arr(info->uprobe_multi.cookies)[i]);
358 jsonw_end_object(json_wtr);
360 jsonw_end_array(json_wtr);
364 show_perf_event_kprobe_json(struct bpf_link_info *info, json_writer_t *wtr)
366 jsonw_bool_field(wtr, "retprobe", info->perf_event.type == BPF_PERF_EVENT_KRETPROBE);
367 jsonw_uint_field(wtr, "addr", info->perf_event.kprobe.addr);
368 jsonw_string_field(wtr, "func",
369 u64_to_ptr(info->perf_event.kprobe.func_name));
370 jsonw_uint_field(wtr, "offset", info->perf_event.kprobe.offset);
371 jsonw_uint_field(wtr, "missed", info->perf_event.kprobe.missed);
372 jsonw_uint_field(wtr, "cookie", info->perf_event.kprobe.cookie);
376 show_perf_event_uprobe_json(struct bpf_link_info *info, json_writer_t *wtr)
378 jsonw_bool_field(wtr, "retprobe", info->perf_event.type == BPF_PERF_EVENT_URETPROBE);
379 jsonw_string_field(wtr, "file",
380 u64_to_ptr(info->perf_event.uprobe.file_name));
381 jsonw_uint_field(wtr, "offset", info->perf_event.uprobe.offset);
382 jsonw_uint_field(wtr, "cookie", info->perf_event.uprobe.cookie);
386 show_perf_event_tracepoint_json(struct bpf_link_info *info, json_writer_t *wtr)
388 jsonw_string_field(wtr, "tracepoint",
389 u64_to_ptr(info->perf_event.tracepoint.tp_name));
390 jsonw_uint_field(wtr, "cookie", info->perf_event.tracepoint.cookie);
393 static char *perf_config_hw_cache_str(__u64 config)
395 const char *hw_cache, *result, *op;
396 char *str = malloc(PERF_HW_CACHE_LEN);
399 p_err("mem alloc failed");
403 hw_cache = perf_event_name(evsel__hw_cache, config & 0xff);
405 snprintf(str, PERF_HW_CACHE_LEN, "%s-", hw_cache);
407 snprintf(str, PERF_HW_CACHE_LEN, "%lld-", config & 0xff);
409 op = perf_event_name(evsel__hw_cache_op, (config >> 8) & 0xff);
411 snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str),
414 snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str),
415 "%lld-", (config >> 8) & 0xff);
417 result = perf_event_name(evsel__hw_cache_result, config >> 16);
419 snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str),
422 snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str),
423 "%lld", config >> 16);
427 static const char *perf_config_str(__u32 type, __u64 config)
429 const char *perf_config;
432 case PERF_TYPE_HARDWARE:
433 perf_config = perf_event_name(event_symbols_hw, config);
435 case PERF_TYPE_SOFTWARE:
436 perf_config = perf_event_name(event_symbols_sw, config);
438 case PERF_TYPE_HW_CACHE:
439 perf_config = perf_config_hw_cache_str(config);
449 show_perf_event_event_json(struct bpf_link_info *info, json_writer_t *wtr)
451 __u64 config = info->perf_event.event.config;
452 __u32 type = info->perf_event.event.type;
453 const char *perf_type, *perf_config;
455 perf_type = perf_event_name(perf_type_name, type);
457 jsonw_string_field(wtr, "event_type", perf_type);
459 jsonw_uint_field(wtr, "event_type", type);
461 perf_config = perf_config_str(type, config);
463 jsonw_string_field(wtr, "event_config", perf_config);
465 jsonw_uint_field(wtr, "event_config", config);
467 jsonw_uint_field(wtr, "cookie", info->perf_event.event.cookie);
469 if (type == PERF_TYPE_HW_CACHE && perf_config)
470 free((void *)perf_config);
473 static int show_link_close_json(int fd, struct bpf_link_info *info)
475 struct bpf_prog_info prog_info;
476 const char *prog_type_str;
479 jsonw_start_object(json_wtr);
481 show_link_header_json(info, json_wtr);
483 switch (info->type) {
484 case BPF_LINK_TYPE_RAW_TRACEPOINT:
485 jsonw_string_field(json_wtr, "tp_name",
486 u64_to_ptr(info->raw_tracepoint.tp_name));
488 case BPF_LINK_TYPE_TRACING:
489 err = get_prog_info(info->prog_id, &prog_info);
493 prog_type_str = libbpf_bpf_prog_type_str(prog_info.type);
494 /* libbpf will return NULL for variants unknown to it. */
496 jsonw_string_field(json_wtr, "prog_type", prog_type_str);
498 jsonw_uint_field(json_wtr, "prog_type", prog_info.type);
500 show_link_attach_type_json(info->tracing.attach_type,
502 jsonw_uint_field(json_wtr, "target_obj_id", info->tracing.target_obj_id);
503 jsonw_uint_field(json_wtr, "target_btf_id", info->tracing.target_btf_id);
505 case BPF_LINK_TYPE_CGROUP:
506 jsonw_lluint_field(json_wtr, "cgroup_id",
507 info->cgroup.cgroup_id);
508 show_link_attach_type_json(info->cgroup.attach_type, json_wtr);
510 case BPF_LINK_TYPE_ITER:
511 show_iter_json(info, json_wtr);
513 case BPF_LINK_TYPE_NETNS:
514 jsonw_uint_field(json_wtr, "netns_ino",
515 info->netns.netns_ino);
516 show_link_attach_type_json(info->netns.attach_type, json_wtr);
518 case BPF_LINK_TYPE_NETFILTER:
519 netfilter_dump_json(info, json_wtr);
521 case BPF_LINK_TYPE_TCX:
522 show_link_ifindex_json(info->tcx.ifindex, json_wtr);
523 show_link_attach_type_json(info->tcx.attach_type, json_wtr);
525 case BPF_LINK_TYPE_NETKIT:
526 show_link_ifindex_json(info->netkit.ifindex, json_wtr);
527 show_link_attach_type_json(info->netkit.attach_type, json_wtr);
529 case BPF_LINK_TYPE_XDP:
530 show_link_ifindex_json(info->xdp.ifindex, json_wtr);
532 case BPF_LINK_TYPE_STRUCT_OPS:
533 jsonw_uint_field(json_wtr, "map_id",
534 info->struct_ops.map_id);
536 case BPF_LINK_TYPE_KPROBE_MULTI:
537 show_kprobe_multi_json(info, json_wtr);
539 case BPF_LINK_TYPE_UPROBE_MULTI:
540 show_uprobe_multi_json(info, json_wtr);
542 case BPF_LINK_TYPE_PERF_EVENT:
543 switch (info->perf_event.type) {
544 case BPF_PERF_EVENT_EVENT:
545 show_perf_event_event_json(info, json_wtr);
547 case BPF_PERF_EVENT_TRACEPOINT:
548 show_perf_event_tracepoint_json(info, json_wtr);
550 case BPF_PERF_EVENT_KPROBE:
551 case BPF_PERF_EVENT_KRETPROBE:
552 show_perf_event_kprobe_json(info, json_wtr);
554 case BPF_PERF_EVENT_UPROBE:
555 case BPF_PERF_EVENT_URETPROBE:
556 show_perf_event_uprobe_json(info, json_wtr);
566 if (!hashmap__empty(link_table)) {
567 struct hashmap_entry *entry;
569 jsonw_name(json_wtr, "pinned");
570 jsonw_start_array(json_wtr);
571 hashmap__for_each_key_entry(link_table, entry, info->id)
572 jsonw_string(json_wtr, entry->pvalue);
573 jsonw_end_array(json_wtr);
576 emit_obj_refs_json(refs_table, info->id, json_wtr);
578 jsonw_end_object(json_wtr);
583 static void show_link_header_plain(struct bpf_link_info *info)
585 const char *link_type_str;
587 printf("%u: ", info->id);
588 link_type_str = libbpf_bpf_link_type_str(info->type);
590 printf("%s ", link_type_str);
592 printf("type %u ", info->type);
594 if (info->type == BPF_LINK_TYPE_STRUCT_OPS)
595 printf("map %u ", info->struct_ops.map_id);
597 printf("prog %u ", info->prog_id);
600 static void show_link_attach_type_plain(__u32 attach_type)
602 const char *attach_type_str;
604 attach_type_str = libbpf_bpf_attach_type_str(attach_type);
606 printf("attach_type %s ", attach_type_str);
608 printf("attach_type %u ", attach_type);
611 static void show_link_ifindex_plain(__u32 ifindex)
613 char devname[IF_NAMESIZE * 2] = "(unknown)";
614 char tmpname[IF_NAMESIZE];
618 ret = if_indextoname(ifindex, tmpname);
620 snprintf(devname, sizeof(devname), "(detached)");
622 snprintf(devname, sizeof(devname), "%s(%d)",
624 printf("ifindex %s ", devname);
627 static void show_iter_plain(struct bpf_link_info *info)
629 const char *target_name = u64_to_ptr(info->iter.target_name);
631 printf("target_name %s ", target_name);
633 if (is_iter_map_target(target_name))
634 printf("map_id %u ", info->iter.map.map_id);
635 else if (is_iter_task_target(target_name)) {
636 if (info->iter.task.tid)
637 printf("tid %u ", info->iter.task.tid);
638 else if (info->iter.task.pid)
639 printf("pid %u ", info->iter.task.pid);
642 if (is_iter_cgroup_target(target_name)) {
643 printf("cgroup_id %llu ", info->iter.cgroup.cgroup_id);
645 cgroup_order_string(info->iter.cgroup.order));
649 static const char * const pf2name[] = {
650 [NFPROTO_INET] = "inet",
651 [NFPROTO_IPV4] = "ip",
652 [NFPROTO_ARP] = "arp",
653 [NFPROTO_NETDEV] = "netdev",
654 [NFPROTO_BRIDGE] = "bridge",
655 [NFPROTO_IPV6] = "ip6",
658 static const char * const inethook2name[] = {
659 [NF_INET_PRE_ROUTING] = "prerouting",
660 [NF_INET_LOCAL_IN] = "input",
661 [NF_INET_FORWARD] = "forward",
662 [NF_INET_LOCAL_OUT] = "output",
663 [NF_INET_POST_ROUTING] = "postrouting",
666 static const char * const arphook2name[] = {
667 [NF_ARP_IN] = "input",
668 [NF_ARP_OUT] = "output",
671 void netfilter_dump_plain(const struct bpf_link_info *info)
673 const char *hookname = NULL, *pfname = NULL;
674 unsigned int hook = info->netfilter.hooknum;
675 unsigned int pf = info->netfilter.pf;
677 if (pf < ARRAY_SIZE(pf2name))
678 pfname = pf2name[pf];
681 case NFPROTO_BRIDGE: /* bridge shares numbers with enum nf_inet_hooks */
685 if (hook < ARRAY_SIZE(inethook2name))
686 hookname = inethook2name[hook];
689 if (hook < ARRAY_SIZE(arphook2name))
690 hookname = arphook2name[hook];
696 printf("\n\t%s", pfname);
698 printf("\n\tpf: %d", pf);
701 printf(" %s", hookname);
703 printf(", hook %u,", hook);
705 printf(" prio %d", info->netfilter.priority);
707 if (info->netfilter.flags)
708 printf(" flags 0x%x", info->netfilter.flags);
711 static void show_kprobe_multi_plain(struct bpf_link_info *info)
713 struct addr_cookie *data;
716 if (!info->kprobe_multi.count)
719 if (info->kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN)
720 printf("\n\tkretprobe.multi ");
722 printf("\n\tkprobe.multi ");
723 printf("func_cnt %u ", info->kprobe_multi.count);
724 if (info->kprobe_multi.missed)
725 printf("missed %llu ", info->kprobe_multi.missed);
726 data = get_addr_cookie_array(u64_to_ptr(info->kprobe_multi.addrs),
727 u64_to_ptr(info->kprobe_multi.cookies),
728 info->kprobe_multi.count);
732 /* Load it once for all. */
734 kernel_syms_load(&dd);
738 printf("\n\t%-16s %-16s %s", "addr", "cookie", "func [module]");
739 for (i = 0; i < dd.sym_count; i++) {
740 if (dd.sym_mapping[i].address != data[j].addr)
742 printf("\n\t%016lx %-16llx %s",
743 dd.sym_mapping[i].address, data[j].cookie, dd.sym_mapping[i].name);
744 if (dd.sym_mapping[i].module[0] != '\0')
745 printf(" [%s] ", dd.sym_mapping[i].module);
749 if (j++ == info->kprobe_multi.count)
756 static void show_uprobe_multi_plain(struct bpf_link_info *info)
760 if (!info->uprobe_multi.count)
763 if (info->uprobe_multi.flags & BPF_F_UPROBE_MULTI_RETURN)
764 printf("\n\turetprobe.multi ");
766 printf("\n\tuprobe.multi ");
768 printf("path %s ", (char *) u64_to_ptr(info->uprobe_multi.path));
769 printf("func_cnt %u ", info->uprobe_multi.count);
771 if (info->uprobe_multi.pid)
772 printf("pid %d ", info->uprobe_multi.pid);
774 printf("\n\t%-16s %-16s %-16s", "offset", "ref_ctr_offset", "cookies");
775 for (i = 0; i < info->uprobe_multi.count; i++) {
776 printf("\n\t0x%-16llx 0x%-16llx 0x%-16llx",
777 u64_to_arr(info->uprobe_multi.offsets)[i],
778 u64_to_arr(info->uprobe_multi.ref_ctr_offsets)[i],
779 u64_to_arr(info->uprobe_multi.cookies)[i]);
783 static void show_perf_event_kprobe_plain(struct bpf_link_info *info)
787 buf = u64_to_ptr(info->perf_event.kprobe.func_name);
788 if (buf[0] == '\0' && !info->perf_event.kprobe.addr)
791 if (info->perf_event.type == BPF_PERF_EVENT_KRETPROBE)
792 printf("\n\tkretprobe ");
794 printf("\n\tkprobe ");
795 if (info->perf_event.kprobe.addr)
796 printf("%llx ", info->perf_event.kprobe.addr);
798 if (info->perf_event.kprobe.offset)
799 printf("+%#x", info->perf_event.kprobe.offset);
800 if (info->perf_event.kprobe.missed)
801 printf(" missed %llu", info->perf_event.kprobe.missed);
802 if (info->perf_event.kprobe.cookie)
803 printf(" cookie %llu", info->perf_event.kprobe.cookie);
807 static void show_perf_event_uprobe_plain(struct bpf_link_info *info)
811 buf = u64_to_ptr(info->perf_event.uprobe.file_name);
815 if (info->perf_event.type == BPF_PERF_EVENT_URETPROBE)
816 printf("\n\turetprobe ");
818 printf("\n\tuprobe ");
819 printf("%s+%#x ", buf, info->perf_event.uprobe.offset);
820 if (info->perf_event.uprobe.cookie)
821 printf("cookie %llu ", info->perf_event.uprobe.cookie);
824 static void show_perf_event_tracepoint_plain(struct bpf_link_info *info)
828 buf = u64_to_ptr(info->perf_event.tracepoint.tp_name);
832 printf("\n\ttracepoint %s ", buf);
833 if (info->perf_event.tracepoint.cookie)
834 printf("cookie %llu ", info->perf_event.tracepoint.cookie);
837 static void show_perf_event_event_plain(struct bpf_link_info *info)
839 __u64 config = info->perf_event.event.config;
840 __u32 type = info->perf_event.event.type;
841 const char *perf_type, *perf_config;
843 printf("\n\tevent ");
844 perf_type = perf_event_name(perf_type_name, type);
846 printf("%s:", perf_type);
848 printf("%u :", type);
850 perf_config = perf_config_str(type, config);
852 printf("%s ", perf_config);
854 printf("%llu ", config);
856 if (info->perf_event.event.cookie)
857 printf("cookie %llu ", info->perf_event.event.cookie);
859 if (type == PERF_TYPE_HW_CACHE && perf_config)
860 free((void *)perf_config);
863 static int show_link_close_plain(int fd, struct bpf_link_info *info)
865 struct bpf_prog_info prog_info;
866 const char *prog_type_str;
869 show_link_header_plain(info);
871 switch (info->type) {
872 case BPF_LINK_TYPE_RAW_TRACEPOINT:
873 printf("\n\ttp '%s' ",
874 (const char *)u64_to_ptr(info->raw_tracepoint.tp_name));
876 case BPF_LINK_TYPE_TRACING:
877 err = get_prog_info(info->prog_id, &prog_info);
881 prog_type_str = libbpf_bpf_prog_type_str(prog_info.type);
882 /* libbpf will return NULL for variants unknown to it. */
884 printf("\n\tprog_type %s ", prog_type_str);
886 printf("\n\tprog_type %u ", prog_info.type);
888 show_link_attach_type_plain(info->tracing.attach_type);
889 if (info->tracing.target_obj_id || info->tracing.target_btf_id)
890 printf("\n\ttarget_obj_id %u target_btf_id %u ",
891 info->tracing.target_obj_id,
892 info->tracing.target_btf_id);
894 case BPF_LINK_TYPE_CGROUP:
895 printf("\n\tcgroup_id %zu ", (size_t)info->cgroup.cgroup_id);
896 show_link_attach_type_plain(info->cgroup.attach_type);
898 case BPF_LINK_TYPE_ITER:
899 show_iter_plain(info);
901 case BPF_LINK_TYPE_NETNS:
902 printf("\n\tnetns_ino %u ", info->netns.netns_ino);
903 show_link_attach_type_plain(info->netns.attach_type);
905 case BPF_LINK_TYPE_NETFILTER:
906 netfilter_dump_plain(info);
908 case BPF_LINK_TYPE_TCX:
910 show_link_ifindex_plain(info->tcx.ifindex);
911 show_link_attach_type_plain(info->tcx.attach_type);
913 case BPF_LINK_TYPE_NETKIT:
915 show_link_ifindex_plain(info->netkit.ifindex);
916 show_link_attach_type_plain(info->netkit.attach_type);
918 case BPF_LINK_TYPE_XDP:
920 show_link_ifindex_plain(info->xdp.ifindex);
922 case BPF_LINK_TYPE_KPROBE_MULTI:
923 show_kprobe_multi_plain(info);
925 case BPF_LINK_TYPE_UPROBE_MULTI:
926 show_uprobe_multi_plain(info);
928 case BPF_LINK_TYPE_PERF_EVENT:
929 switch (info->perf_event.type) {
930 case BPF_PERF_EVENT_EVENT:
931 show_perf_event_event_plain(info);
933 case BPF_PERF_EVENT_TRACEPOINT:
934 show_perf_event_tracepoint_plain(info);
936 case BPF_PERF_EVENT_KPROBE:
937 case BPF_PERF_EVENT_KRETPROBE:
938 show_perf_event_kprobe_plain(info);
940 case BPF_PERF_EVENT_UPROBE:
941 case BPF_PERF_EVENT_URETPROBE:
942 show_perf_event_uprobe_plain(info);
952 if (!hashmap__empty(link_table)) {
953 struct hashmap_entry *entry;
955 hashmap__for_each_key_entry(link_table, entry, info->id)
956 printf("\n\tpinned %s", (char *)entry->pvalue);
958 emit_obj_refs_plain(refs_table, info->id, "\n\tpids ");
965 static int do_show_link(int fd)
967 __u64 *ref_ctr_offsets = NULL, *offsets = NULL, *cookies = NULL;
968 struct bpf_link_info info;
969 __u32 len = sizeof(info);
970 char path_buf[PATH_MAX];
976 memset(&info, 0, sizeof(info));
979 err = bpf_link_get_info_by_fd(fd, &info, &len);
981 p_err("can't get link info: %s",
986 if (info.type == BPF_LINK_TYPE_RAW_TRACEPOINT &&
987 !info.raw_tracepoint.tp_name) {
988 info.raw_tracepoint.tp_name = ptr_to_u64(&buf);
989 info.raw_tracepoint.tp_name_len = sizeof(buf);
992 if (info.type == BPF_LINK_TYPE_ITER &&
993 !info.iter.target_name) {
994 info.iter.target_name = ptr_to_u64(&buf);
995 info.iter.target_name_len = sizeof(buf);
998 if (info.type == BPF_LINK_TYPE_KPROBE_MULTI &&
999 !info.kprobe_multi.addrs) {
1000 count = info.kprobe_multi.count;
1002 addrs = calloc(count, sizeof(__u64));
1004 p_err("mem alloc failed");
1008 info.kprobe_multi.addrs = ptr_to_u64(addrs);
1009 cookies = calloc(count, sizeof(__u64));
1011 p_err("mem alloc failed");
1016 info.kprobe_multi.cookies = ptr_to_u64(cookies);
1020 if (info.type == BPF_LINK_TYPE_UPROBE_MULTI &&
1021 !info.uprobe_multi.offsets) {
1022 count = info.uprobe_multi.count;
1024 offsets = calloc(count, sizeof(__u64));
1026 p_err("mem alloc failed");
1030 info.uprobe_multi.offsets = ptr_to_u64(offsets);
1031 ref_ctr_offsets = calloc(count, sizeof(__u64));
1032 if (!ref_ctr_offsets) {
1033 p_err("mem alloc failed");
1038 info.uprobe_multi.ref_ctr_offsets = ptr_to_u64(ref_ctr_offsets);
1039 cookies = calloc(count, sizeof(__u64));
1041 p_err("mem alloc failed");
1042 free(ref_ctr_offsets);
1047 info.uprobe_multi.cookies = ptr_to_u64(cookies);
1048 info.uprobe_multi.path = ptr_to_u64(path_buf);
1049 info.uprobe_multi.path_size = sizeof(path_buf);
1053 if (info.type == BPF_LINK_TYPE_PERF_EVENT) {
1054 switch (info.perf_event.type) {
1055 case BPF_PERF_EVENT_TRACEPOINT:
1056 if (!info.perf_event.tracepoint.tp_name) {
1057 info.perf_event.tracepoint.tp_name = ptr_to_u64(&buf);
1058 info.perf_event.tracepoint.name_len = sizeof(buf);
1062 case BPF_PERF_EVENT_KPROBE:
1063 case BPF_PERF_EVENT_KRETPROBE:
1064 if (!info.perf_event.kprobe.func_name) {
1065 info.perf_event.kprobe.func_name = ptr_to_u64(&buf);
1066 info.perf_event.kprobe.name_len = sizeof(buf);
1070 case BPF_PERF_EVENT_UPROBE:
1071 case BPF_PERF_EVENT_URETPROBE:
1072 if (!info.perf_event.uprobe.file_name) {
1073 info.perf_event.uprobe.file_name = ptr_to_u64(&buf);
1074 info.perf_event.uprobe.name_len = sizeof(buf);
1084 show_link_close_json(fd, &info);
1086 show_link_close_plain(fd, &info);
1088 free(ref_ctr_offsets);
1096 static int do_show(int argc, char **argv)
1102 link_table = hashmap__new(hash_fn_for_key_as_id,
1103 equal_fn_for_key_as_id, NULL);
1104 if (IS_ERR(link_table)) {
1105 p_err("failed to create hashmap for pinned paths");
1108 build_pinned_obj_table(link_table, BPF_OBJ_LINK);
1110 build_obj_refs_table(&refs_table, BPF_OBJ_LINK);
1113 fd = link_parse_fd(&argc, &argv);
1124 jsonw_start_array(json_wtr);
1126 err = bpf_link_get_next_id(id, &id);
1128 if (errno == ENOENT)
1130 p_err("can't get next link: %s%s", strerror(errno),
1131 errno == EINVAL ? " -- kernel too old?" : "");
1135 fd = bpf_link_get_fd_by_id(id);
1137 if (errno == ENOENT)
1139 p_err("can't get link by id (%u): %s",
1140 id, strerror(errno));
1144 err = do_show_link(fd);
1149 jsonw_end_array(json_wtr);
1151 delete_obj_refs_table(refs_table);
1154 delete_pinned_obj_table(link_table);
1158 kernel_syms_destroy(&dd);
1159 return errno == ENOENT ? 0 : -1;
1162 static int do_pin(int argc, char **argv)
1166 err = do_pin_any(argc, argv, link_parse_fd);
1167 if (!err && json_output)
1168 jsonw_null(json_wtr);
1172 static int do_detach(int argc, char **argv)
1177 p_err("link specifier is invalid or missing\n");
1181 fd = link_parse_fd(&argc, &argv);
1185 err = bpf_link_detach(fd);
1190 p_err("failed link detach: %s", strerror(-err));
1195 jsonw_null(json_wtr);
1200 static int do_help(int argc, char **argv)
1203 jsonw_null(json_wtr);
1208 "Usage: %1$s %2$s { show | list } [LINK]\n"
1209 " %1$s %2$s pin LINK FILE\n"
1210 " %1$s %2$s detach LINK\n"
1213 " " HELP_SPEC_LINK "\n"
1214 " " HELP_SPEC_OPTIONS " |\n"
1215 " {-f|--bpffs} | {-n|--nomount} }\n"
1217 bin_name, argv[-2]);
1222 static const struct cmd cmds[] = {
1223 { "show", do_show },
1224 { "list", do_show },
1225 { "help", do_help },
1227 { "detach", do_detach },
1231 int do_link(int argc, char **argv)
1233 return cmd_select(cmds, argc, argv, do_help);