1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2020 Facebook */
4 #include <linux/init.h>
5 #include <linux/namei.h>
6 #include <linux/pid_namespace.h>
8 #include <linux/fdtable.h>
9 #include <linux/filter.h>
10 #include <linux/btf_ids.h>
11 #include "mmap_unlock_work.h"
13 static const char * const iter_task_type_names[] = {
19 struct bpf_iter_seq_task_common {
20 struct pid_namespace *ns;
21 enum bpf_iter_task_type type;
26 struct bpf_iter_seq_task_info {
27 /* The first field must be struct bpf_iter_seq_task_common.
28 * this is assumed by {init, fini}_seq_pidns() callback functions.
30 struct bpf_iter_seq_task_common common;
34 static struct task_struct *task_group_seq_get_next(struct bpf_iter_seq_task_common *common,
36 bool skip_if_dup_files)
38 struct task_struct *task, *next_task;
43 /* The first time, the iterator calls this function. */
44 pid = find_pid_ns(common->pid, common->ns);
45 task = get_pid_task(pid, PIDTYPE_TGID);
50 common->pid_visiting = common->pid;
55 /* If the control returns to user space and comes back to the
56 * kernel again, *tid and common->pid_visiting should be the
57 * same for task_seq_start() to pick up the correct task.
59 if (*tid == common->pid_visiting) {
60 pid = find_pid_ns(common->pid_visiting, common->ns);
61 task = get_pid_task(pid, PIDTYPE_PID);
66 task = find_task_by_pid_ns(common->pid_visiting, common->ns);
71 next_task = next_thread(task);
74 *tid = __task_pid_nr_ns(next_task, PIDTYPE_PID, common->ns);
75 if (!*tid || *tid == common->pid) {
76 /* Run out of tasks of a process. The tasks of a
77 * thread_group are linked as circular linked list.
83 common->pid_visiting = *tid;
85 if (skip_if_dup_files && task->files == task->group_leader->files) {
90 get_task_struct(next_task);
94 static struct task_struct *task_seq_get_next(struct bpf_iter_seq_task_common *common,
96 bool skip_if_dup_files)
98 struct task_struct *task = NULL;
101 if (common->type == BPF_TASK_ITER_TID) {
102 if (*tid && *tid != common->pid)
105 pid = find_pid_ns(common->pid, common->ns);
107 task = get_pid_task(pid, PIDTYPE_TGID);
115 if (common->type == BPF_TASK_ITER_TGID) {
117 task = task_group_seq_get_next(common, tid, skip_if_dup_files);
125 pid = find_ge_pid(*tid, common->ns);
127 *tid = pid_nr_ns(pid, common->ns);
128 task = get_pid_task(pid, PIDTYPE_PID);
132 } else if (skip_if_dup_files && !thread_group_leader(task) &&
133 task->files == task->group_leader->files) {
134 put_task_struct(task);
145 static void *task_seq_start(struct seq_file *seq, loff_t *pos)
147 struct bpf_iter_seq_task_info *info = seq->private;
148 struct task_struct *task;
150 task = task_seq_get_next(&info->common, &info->tid, false);
159 static void *task_seq_next(struct seq_file *seq, void *v, loff_t *pos)
161 struct bpf_iter_seq_task_info *info = seq->private;
162 struct task_struct *task;
166 put_task_struct((struct task_struct *)v);
167 task = task_seq_get_next(&info->common, &info->tid, false);
174 struct bpf_iter__task {
175 __bpf_md_ptr(struct bpf_iter_meta *, meta);
176 __bpf_md_ptr(struct task_struct *, task);
179 DEFINE_BPF_ITER_FUNC(task, struct bpf_iter_meta *meta, struct task_struct *task)
181 static int __task_seq_show(struct seq_file *seq, struct task_struct *task,
184 struct bpf_iter_meta meta;
185 struct bpf_iter__task ctx;
186 struct bpf_prog *prog;
189 prog = bpf_iter_get_info(&meta, in_stop);
195 return bpf_iter_run_prog(prog, &ctx);
198 static int task_seq_show(struct seq_file *seq, void *v)
200 return __task_seq_show(seq, v, false);
203 static void task_seq_stop(struct seq_file *seq, void *v)
206 (void)__task_seq_show(seq, v, true);
208 put_task_struct((struct task_struct *)v);
211 static int bpf_iter_attach_task(struct bpf_prog *prog,
212 union bpf_iter_link_info *linfo,
213 struct bpf_iter_aux_info *aux)
219 if ((!!linfo->task.tid + !!linfo->task.pid + !!linfo->task.pid_fd) > 1)
222 aux->task.type = BPF_TASK_ITER_ALL;
223 if (linfo->task.tid != 0) {
224 aux->task.type = BPF_TASK_ITER_TID;
225 aux->task.pid = linfo->task.tid;
227 if (linfo->task.pid != 0) {
228 aux->task.type = BPF_TASK_ITER_TGID;
229 aux->task.pid = linfo->task.pid;
231 if (linfo->task.pid_fd != 0) {
232 aux->task.type = BPF_TASK_ITER_TGID;
234 pid = pidfd_get_pid(linfo->task.pid_fd, &flags);
238 tgid = pid_nr_ns(pid, task_active_pid_ns(current));
239 aux->task.pid = tgid;
246 static const struct seq_operations task_seq_ops = {
247 .start = task_seq_start,
248 .next = task_seq_next,
249 .stop = task_seq_stop,
250 .show = task_seq_show,
253 struct bpf_iter_seq_task_file_info {
254 /* The first field must be struct bpf_iter_seq_task_common.
255 * this is assumed by {init, fini}_seq_pidns() callback functions.
257 struct bpf_iter_seq_task_common common;
258 struct task_struct *task;
264 task_file_seq_get_next(struct bpf_iter_seq_task_file_info *info)
266 u32 saved_tid = info->tid;
267 struct task_struct *curr_task;
268 unsigned int curr_fd = info->fd;
270 /* If this function returns a non-NULL file object,
271 * it held a reference to the task/file.
272 * Otherwise, it does not hold any reference.
276 curr_task = info->task;
279 curr_task = task_seq_get_next(&info->common, &info->tid, true);
286 info->task = curr_task;
287 if (saved_tid == info->tid)
296 f = task_lookup_next_fd_rcu(curr_task, &curr_fd);
299 if (!get_file_rcu(f))
308 /* the current task is done, go to the next task */
310 put_task_struct(curr_task);
312 if (info->common.type == BPF_TASK_ITER_TID) {
319 saved_tid = ++(info->tid);
323 static void *task_file_seq_start(struct seq_file *seq, loff_t *pos)
325 struct bpf_iter_seq_task_file_info *info = seq->private;
329 file = task_file_seq_get_next(info);
330 if (file && *pos == 0)
336 static void *task_file_seq_next(struct seq_file *seq, void *v, loff_t *pos)
338 struct bpf_iter_seq_task_file_info *info = seq->private;
342 fput((struct file *)v);
343 return task_file_seq_get_next(info);
346 struct bpf_iter__task_file {
347 __bpf_md_ptr(struct bpf_iter_meta *, meta);
348 __bpf_md_ptr(struct task_struct *, task);
350 __bpf_md_ptr(struct file *, file);
353 DEFINE_BPF_ITER_FUNC(task_file, struct bpf_iter_meta *meta,
354 struct task_struct *task, u32 fd,
357 static int __task_file_seq_show(struct seq_file *seq, struct file *file,
360 struct bpf_iter_seq_task_file_info *info = seq->private;
361 struct bpf_iter__task_file ctx;
362 struct bpf_iter_meta meta;
363 struct bpf_prog *prog;
366 prog = bpf_iter_get_info(&meta, in_stop);
371 ctx.task = info->task;
374 return bpf_iter_run_prog(prog, &ctx);
377 static int task_file_seq_show(struct seq_file *seq, void *v)
379 return __task_file_seq_show(seq, v, false);
382 static void task_file_seq_stop(struct seq_file *seq, void *v)
384 struct bpf_iter_seq_task_file_info *info = seq->private;
387 (void)__task_file_seq_show(seq, v, true);
389 fput((struct file *)v);
390 put_task_struct(info->task);
395 static int init_seq_pidns(void *priv_data, struct bpf_iter_aux_info *aux)
397 struct bpf_iter_seq_task_common *common = priv_data;
399 common->ns = get_pid_ns(task_active_pid_ns(current));
400 common->type = aux->task.type;
401 common->pid = aux->task.pid;
406 static void fini_seq_pidns(void *priv_data)
408 struct bpf_iter_seq_task_common *common = priv_data;
410 put_pid_ns(common->ns);
413 static const struct seq_operations task_file_seq_ops = {
414 .start = task_file_seq_start,
415 .next = task_file_seq_next,
416 .stop = task_file_seq_stop,
417 .show = task_file_seq_show,
420 struct bpf_iter_seq_task_vma_info {
421 /* The first field must be struct bpf_iter_seq_task_common.
422 * this is assumed by {init, fini}_seq_pidns() callback functions.
424 struct bpf_iter_seq_task_common common;
425 struct task_struct *task;
426 struct mm_struct *mm;
427 struct vm_area_struct *vma;
429 unsigned long prev_vm_start;
430 unsigned long prev_vm_end;
433 enum bpf_task_vma_iter_find_op {
434 task_vma_iter_first_vma, /* use find_vma() with addr 0 */
435 task_vma_iter_next_vma, /* use vma_next() with curr_vma */
436 task_vma_iter_find_vma, /* use find_vma() to find next vma */
439 static struct vm_area_struct *
440 task_vma_seq_get_next(struct bpf_iter_seq_task_vma_info *info)
442 enum bpf_task_vma_iter_find_op op;
443 struct vm_area_struct *curr_vma;
444 struct task_struct *curr_task;
445 struct mm_struct *curr_mm;
446 u32 saved_tid = info->tid;
448 /* If this function returns a non-NULL vma, it holds a reference to
449 * the task_struct, holds a refcount on mm->mm_users, and holds
450 * read lock on vma->mm->mmap_lock.
451 * If this function returns NULL, it does not hold any reference or
455 curr_task = info->task;
456 curr_vma = info->vma;
458 /* In case of lock contention, drop mmap_lock to unblock
461 * After relock, call find(mm, prev_vm_end - 1) to find
462 * new vma to process.
464 * +------+------+-----------+
465 * | VMA1 | VMA2 | VMA3 |
466 * +------+------+-----------+
470 * For example, curr_vma == VMA2. Before unlock, we set
475 * There are a few cases:
477 * 1) VMA2 is freed, but VMA3 exists.
479 * find_vma() will return VMA3, just process VMA3.
481 * 2) VMA2 still exists.
483 * find_vma() will return VMA2, process VMA2->next.
485 * 3) no more vma in this mm.
487 * Process the next task.
489 * 4) find_vma() returns a different vma, VMA2'.
491 * 4.1) If VMA2 covers same range as VMA2', skip VMA2',
492 * because we already covered the range;
493 * 4.2) VMA2 and VMA2' covers different ranges, process
496 if (mmap_lock_is_contended(curr_mm)) {
497 info->prev_vm_start = curr_vma->vm_start;
498 info->prev_vm_end = curr_vma->vm_end;
499 op = task_vma_iter_find_vma;
500 mmap_read_unlock(curr_mm);
501 if (mmap_read_lock_killable(curr_mm)) {
506 op = task_vma_iter_next_vma;
510 curr_task = task_seq_get_next(&info->common, &info->tid, true);
516 if (saved_tid != info->tid) {
517 /* new task, process the first vma */
518 op = task_vma_iter_first_vma;
520 /* Found the same tid, which means the user space
521 * finished data in previous buffer and read more.
522 * We dropped mmap_lock before returning to user
523 * space, so it is necessary to use find_vma() to
524 * find the next vma to process.
526 op = task_vma_iter_find_vma;
529 curr_mm = get_task_mm(curr_task);
533 if (mmap_read_lock_killable(curr_mm)) {
540 case task_vma_iter_first_vma:
541 curr_vma = find_vma(curr_mm, 0);
543 case task_vma_iter_next_vma:
544 curr_vma = find_vma(curr_mm, curr_vma->vm_end);
546 case task_vma_iter_find_vma:
547 /* We dropped mmap_lock so it is necessary to use find_vma
548 * to find the next vma. This is similar to the mechanism
549 * in show_smaps_rollup().
551 curr_vma = find_vma(curr_mm, info->prev_vm_end - 1);
552 /* case 1) and 4.2) above just use curr_vma */
554 /* check for case 2) or case 4.1) above */
556 curr_vma->vm_start == info->prev_vm_start &&
557 curr_vma->vm_end == info->prev_vm_end)
558 curr_vma = find_vma(curr_mm, curr_vma->vm_end);
562 /* case 3) above, or case 2) 4.1) with vma->next == NULL */
563 mmap_read_unlock(curr_mm);
567 info->task = curr_task;
568 info->vma = curr_vma;
573 if (info->common.type == BPF_TASK_ITER_TID)
576 put_task_struct(curr_task);
584 put_task_struct(curr_task);
591 static void *task_vma_seq_start(struct seq_file *seq, loff_t *pos)
593 struct bpf_iter_seq_task_vma_info *info = seq->private;
594 struct vm_area_struct *vma;
596 vma = task_vma_seq_get_next(info);
597 if (vma && *pos == 0)
603 static void *task_vma_seq_next(struct seq_file *seq, void *v, loff_t *pos)
605 struct bpf_iter_seq_task_vma_info *info = seq->private;
608 return task_vma_seq_get_next(info);
611 struct bpf_iter__task_vma {
612 __bpf_md_ptr(struct bpf_iter_meta *, meta);
613 __bpf_md_ptr(struct task_struct *, task);
614 __bpf_md_ptr(struct vm_area_struct *, vma);
617 DEFINE_BPF_ITER_FUNC(task_vma, struct bpf_iter_meta *meta,
618 struct task_struct *task, struct vm_area_struct *vma)
620 static int __task_vma_seq_show(struct seq_file *seq, bool in_stop)
622 struct bpf_iter_seq_task_vma_info *info = seq->private;
623 struct bpf_iter__task_vma ctx;
624 struct bpf_iter_meta meta;
625 struct bpf_prog *prog;
628 prog = bpf_iter_get_info(&meta, in_stop);
633 ctx.task = info->task;
635 return bpf_iter_run_prog(prog, &ctx);
638 static int task_vma_seq_show(struct seq_file *seq, void *v)
640 return __task_vma_seq_show(seq, false);
643 static void task_vma_seq_stop(struct seq_file *seq, void *v)
645 struct bpf_iter_seq_task_vma_info *info = seq->private;
648 (void)__task_vma_seq_show(seq, true);
650 /* info->vma has not been seen by the BPF program. If the
651 * user space reads more, task_vma_seq_get_next should
652 * return this vma again. Set prev_vm_start to ~0UL,
653 * so that we don't skip the vma returned by the next
654 * find_vma() (case task_vma_iter_find_vma in
655 * task_vma_seq_get_next()).
657 info->prev_vm_start = ~0UL;
658 info->prev_vm_end = info->vma->vm_end;
659 mmap_read_unlock(info->mm);
662 put_task_struct(info->task);
667 static const struct seq_operations task_vma_seq_ops = {
668 .start = task_vma_seq_start,
669 .next = task_vma_seq_next,
670 .stop = task_vma_seq_stop,
671 .show = task_vma_seq_show,
674 static const struct bpf_iter_seq_info task_seq_info = {
675 .seq_ops = &task_seq_ops,
676 .init_seq_private = init_seq_pidns,
677 .fini_seq_private = fini_seq_pidns,
678 .seq_priv_size = sizeof(struct bpf_iter_seq_task_info),
681 static int bpf_iter_fill_link_info(const struct bpf_iter_aux_info *aux, struct bpf_link_info *info)
683 switch (aux->task.type) {
684 case BPF_TASK_ITER_TID:
685 info->iter.task.tid = aux->task.pid;
687 case BPF_TASK_ITER_TGID:
688 info->iter.task.pid = aux->task.pid;
696 static void bpf_iter_task_show_fdinfo(const struct bpf_iter_aux_info *aux, struct seq_file *seq)
698 seq_printf(seq, "task_type:\t%s\n", iter_task_type_names[aux->task.type]);
699 if (aux->task.type == BPF_TASK_ITER_TID)
700 seq_printf(seq, "tid:\t%u\n", aux->task.pid);
701 else if (aux->task.type == BPF_TASK_ITER_TGID)
702 seq_printf(seq, "pid:\t%u\n", aux->task.pid);
705 static struct bpf_iter_reg task_reg_info = {
707 .attach_target = bpf_iter_attach_task,
708 .feature = BPF_ITER_RESCHED,
709 .ctx_arg_info_size = 1,
711 { offsetof(struct bpf_iter__task, task),
712 PTR_TO_BTF_ID_OR_NULL },
714 .seq_info = &task_seq_info,
715 .fill_link_info = bpf_iter_fill_link_info,
716 .show_fdinfo = bpf_iter_task_show_fdinfo,
719 static const struct bpf_iter_seq_info task_file_seq_info = {
720 .seq_ops = &task_file_seq_ops,
721 .init_seq_private = init_seq_pidns,
722 .fini_seq_private = fini_seq_pidns,
723 .seq_priv_size = sizeof(struct bpf_iter_seq_task_file_info),
726 static struct bpf_iter_reg task_file_reg_info = {
727 .target = "task_file",
728 .attach_target = bpf_iter_attach_task,
729 .feature = BPF_ITER_RESCHED,
730 .ctx_arg_info_size = 2,
732 { offsetof(struct bpf_iter__task_file, task),
733 PTR_TO_BTF_ID_OR_NULL },
734 { offsetof(struct bpf_iter__task_file, file),
735 PTR_TO_BTF_ID_OR_NULL },
737 .seq_info = &task_file_seq_info,
738 .fill_link_info = bpf_iter_fill_link_info,
739 .show_fdinfo = bpf_iter_task_show_fdinfo,
742 static const struct bpf_iter_seq_info task_vma_seq_info = {
743 .seq_ops = &task_vma_seq_ops,
744 .init_seq_private = init_seq_pidns,
745 .fini_seq_private = fini_seq_pidns,
746 .seq_priv_size = sizeof(struct bpf_iter_seq_task_vma_info),
749 static struct bpf_iter_reg task_vma_reg_info = {
750 .target = "task_vma",
751 .attach_target = bpf_iter_attach_task,
752 .feature = BPF_ITER_RESCHED,
753 .ctx_arg_info_size = 2,
755 { offsetof(struct bpf_iter__task_vma, task),
756 PTR_TO_BTF_ID_OR_NULL },
757 { offsetof(struct bpf_iter__task_vma, vma),
758 PTR_TO_BTF_ID_OR_NULL },
760 .seq_info = &task_vma_seq_info,
761 .fill_link_info = bpf_iter_fill_link_info,
762 .show_fdinfo = bpf_iter_task_show_fdinfo,
765 BPF_CALL_5(bpf_find_vma, struct task_struct *, task, u64, start,
766 bpf_callback_t, callback_fn, void *, callback_ctx, u64, flags)
768 struct mmap_unlock_irq_work *work = NULL;
769 struct vm_area_struct *vma;
770 bool irq_work_busy = false;
771 struct mm_struct *mm;
784 irq_work_busy = bpf_mmap_unlock_get_irq_work(&work);
786 if (irq_work_busy || !mmap_read_trylock(mm))
789 vma = find_vma(mm, start);
791 if (vma && vma->vm_start <= start && vma->vm_end > start) {
792 callback_fn((u64)(long)task, (u64)(long)vma,
793 (u64)(long)callback_ctx, 0, 0);
796 bpf_mmap_unlock_mm(work, mm);
800 const struct bpf_func_proto bpf_find_vma_proto = {
801 .func = bpf_find_vma,
802 .ret_type = RET_INTEGER,
803 .arg1_type = ARG_PTR_TO_BTF_ID,
804 .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
805 .arg2_type = ARG_ANYTHING,
806 .arg3_type = ARG_PTR_TO_FUNC,
807 .arg4_type = ARG_PTR_TO_STACK_OR_NULL,
808 .arg5_type = ARG_ANYTHING,
811 DEFINE_PER_CPU(struct mmap_unlock_irq_work, mmap_unlock_work);
813 static void do_mmap_read_unlock(struct irq_work *entry)
815 struct mmap_unlock_irq_work *work;
817 if (WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT)))
820 work = container_of(entry, struct mmap_unlock_irq_work, irq_work);
821 mmap_read_unlock_non_owner(work->mm);
824 static int __init task_iter_init(void)
826 struct mmap_unlock_irq_work *work;
829 for_each_possible_cpu(cpu) {
830 work = per_cpu_ptr(&mmap_unlock_work, cpu);
831 init_irq_work(&work->irq_work, do_mmap_read_unlock);
834 task_reg_info.ctx_arg_info[0].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_TASK];
835 ret = bpf_iter_reg_target(&task_reg_info);
839 task_file_reg_info.ctx_arg_info[0].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_TASK];
840 task_file_reg_info.ctx_arg_info[1].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_FILE];
841 ret = bpf_iter_reg_target(&task_file_reg_info);
845 task_vma_reg_info.ctx_arg_info[0].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_TASK];
846 task_vma_reg_info.ctx_arg_info[1].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA];
847 return bpf_iter_reg_target(&task_vma_reg_info);
849 late_initcall(task_iter_init);