1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2020 Facebook */
4 #include <linux/init.h>
5 #include <linux/namei.h>
6 #include <linux/pid_namespace.h>
8 #include <linux/fdtable.h>
9 #include <linux/filter.h>
10 #include <linux/btf_ids.h>
11 #include "mmap_unlock_work.h"
13 static const char * const iter_task_type_names[] = {
19 struct bpf_iter_seq_task_common {
20 struct pid_namespace *ns;
21 enum bpf_iter_task_type type;
26 struct bpf_iter_seq_task_info {
27 /* The first field must be struct bpf_iter_seq_task_common.
28 * this is assumed by {init, fini}_seq_pidns() callback functions.
30 struct bpf_iter_seq_task_common common;
34 static struct task_struct *task_group_seq_get_next(struct bpf_iter_seq_task_common *common,
36 bool skip_if_dup_files)
38 struct task_struct *task, *next_task;
43 /* The first time, the iterator calls this function. */
44 pid = find_pid_ns(common->pid, common->ns);
48 task = get_pid_task(pid, PIDTYPE_TGID);
53 common->pid_visiting = common->pid;
58 /* If the control returns to user space and comes back to the
59 * kernel again, *tid and common->pid_visiting should be the
60 * same for task_seq_start() to pick up the correct task.
62 if (*tid == common->pid_visiting) {
63 pid = find_pid_ns(common->pid_visiting, common->ns);
64 task = get_pid_task(pid, PIDTYPE_PID);
69 pid = find_pid_ns(common->pid_visiting, common->ns);
73 task = get_pid_task(pid, PIDTYPE_PID);
78 if (!pid_alive(task)) {
79 put_task_struct(task);
83 next_task = next_thread(task);
84 put_task_struct(task);
89 *tid = __task_pid_nr_ns(next_task, PIDTYPE_PID, common->ns);
90 if (!*tid || *tid == common->pid) {
91 /* Run out of tasks of a process. The tasks of a
92 * thread_group are linked as circular linked list.
98 get_task_struct(next_task);
99 common->pid_visiting = *tid;
101 if (skip_if_dup_files && task->files == task->group_leader->files) {
109 static struct task_struct *task_seq_get_next(struct bpf_iter_seq_task_common *common,
111 bool skip_if_dup_files)
113 struct task_struct *task = NULL;
116 if (common->type == BPF_TASK_ITER_TID) {
117 if (*tid && *tid != common->pid)
120 pid = find_pid_ns(common->pid, common->ns);
122 task = get_pid_task(pid, PIDTYPE_TGID);
130 if (common->type == BPF_TASK_ITER_TGID) {
132 task = task_group_seq_get_next(common, tid, skip_if_dup_files);
140 pid = find_ge_pid(*tid, common->ns);
142 *tid = pid_nr_ns(pid, common->ns);
143 task = get_pid_task(pid, PIDTYPE_PID);
147 } else if (skip_if_dup_files && !thread_group_leader(task) &&
148 task->files == task->group_leader->files) {
149 put_task_struct(task);
160 static void *task_seq_start(struct seq_file *seq, loff_t *pos)
162 struct bpf_iter_seq_task_info *info = seq->private;
163 struct task_struct *task;
165 task = task_seq_get_next(&info->common, &info->tid, false);
174 static void *task_seq_next(struct seq_file *seq, void *v, loff_t *pos)
176 struct bpf_iter_seq_task_info *info = seq->private;
177 struct task_struct *task;
181 put_task_struct((struct task_struct *)v);
182 task = task_seq_get_next(&info->common, &info->tid, false);
189 struct bpf_iter__task {
190 __bpf_md_ptr(struct bpf_iter_meta *, meta);
191 __bpf_md_ptr(struct task_struct *, task);
194 DEFINE_BPF_ITER_FUNC(task, struct bpf_iter_meta *meta, struct task_struct *task)
196 static int __task_seq_show(struct seq_file *seq, struct task_struct *task,
199 struct bpf_iter_meta meta;
200 struct bpf_iter__task ctx;
201 struct bpf_prog *prog;
204 prog = bpf_iter_get_info(&meta, in_stop);
210 return bpf_iter_run_prog(prog, &ctx);
213 static int task_seq_show(struct seq_file *seq, void *v)
215 return __task_seq_show(seq, v, false);
218 static void task_seq_stop(struct seq_file *seq, void *v)
221 (void)__task_seq_show(seq, v, true);
223 put_task_struct((struct task_struct *)v);
226 static int bpf_iter_attach_task(struct bpf_prog *prog,
227 union bpf_iter_link_info *linfo,
228 struct bpf_iter_aux_info *aux)
234 if ((!!linfo->task.tid + !!linfo->task.pid + !!linfo->task.pid_fd) > 1)
237 aux->task.type = BPF_TASK_ITER_ALL;
238 if (linfo->task.tid != 0) {
239 aux->task.type = BPF_TASK_ITER_TID;
240 aux->task.pid = linfo->task.tid;
242 if (linfo->task.pid != 0) {
243 aux->task.type = BPF_TASK_ITER_TGID;
244 aux->task.pid = linfo->task.pid;
246 if (linfo->task.pid_fd != 0) {
247 aux->task.type = BPF_TASK_ITER_TGID;
249 pid = pidfd_get_pid(linfo->task.pid_fd, &flags);
253 tgid = pid_nr_ns(pid, task_active_pid_ns(current));
254 aux->task.pid = tgid;
261 static const struct seq_operations task_seq_ops = {
262 .start = task_seq_start,
263 .next = task_seq_next,
264 .stop = task_seq_stop,
265 .show = task_seq_show,
268 struct bpf_iter_seq_task_file_info {
269 /* The first field must be struct bpf_iter_seq_task_common.
270 * this is assumed by {init, fini}_seq_pidns() callback functions.
272 struct bpf_iter_seq_task_common common;
273 struct task_struct *task;
279 task_file_seq_get_next(struct bpf_iter_seq_task_file_info *info)
281 u32 saved_tid = info->tid;
282 struct task_struct *curr_task;
283 unsigned int curr_fd = info->fd;
285 /* If this function returns a non-NULL file object,
286 * it held a reference to the task/file.
287 * Otherwise, it does not hold any reference.
291 curr_task = info->task;
294 curr_task = task_seq_get_next(&info->common, &info->tid, true);
301 info->task = curr_task;
302 if (saved_tid == info->tid)
311 f = task_lookup_next_fdget_rcu(curr_task, &curr_fd);
321 /* the current task is done, go to the next task */
323 put_task_struct(curr_task);
325 if (info->common.type == BPF_TASK_ITER_TID) {
332 saved_tid = ++(info->tid);
336 static void *task_file_seq_start(struct seq_file *seq, loff_t *pos)
338 struct bpf_iter_seq_task_file_info *info = seq->private;
342 file = task_file_seq_get_next(info);
343 if (file && *pos == 0)
349 static void *task_file_seq_next(struct seq_file *seq, void *v, loff_t *pos)
351 struct bpf_iter_seq_task_file_info *info = seq->private;
355 fput((struct file *)v);
356 return task_file_seq_get_next(info);
359 struct bpf_iter__task_file {
360 __bpf_md_ptr(struct bpf_iter_meta *, meta);
361 __bpf_md_ptr(struct task_struct *, task);
363 __bpf_md_ptr(struct file *, file);
366 DEFINE_BPF_ITER_FUNC(task_file, struct bpf_iter_meta *meta,
367 struct task_struct *task, u32 fd,
370 static int __task_file_seq_show(struct seq_file *seq, struct file *file,
373 struct bpf_iter_seq_task_file_info *info = seq->private;
374 struct bpf_iter__task_file ctx;
375 struct bpf_iter_meta meta;
376 struct bpf_prog *prog;
379 prog = bpf_iter_get_info(&meta, in_stop);
384 ctx.task = info->task;
387 return bpf_iter_run_prog(prog, &ctx);
390 static int task_file_seq_show(struct seq_file *seq, void *v)
392 return __task_file_seq_show(seq, v, false);
395 static void task_file_seq_stop(struct seq_file *seq, void *v)
397 struct bpf_iter_seq_task_file_info *info = seq->private;
400 (void)__task_file_seq_show(seq, v, true);
402 fput((struct file *)v);
403 put_task_struct(info->task);
408 static int init_seq_pidns(void *priv_data, struct bpf_iter_aux_info *aux)
410 struct bpf_iter_seq_task_common *common = priv_data;
412 common->ns = get_pid_ns(task_active_pid_ns(current));
413 common->type = aux->task.type;
414 common->pid = aux->task.pid;
419 static void fini_seq_pidns(void *priv_data)
421 struct bpf_iter_seq_task_common *common = priv_data;
423 put_pid_ns(common->ns);
426 static const struct seq_operations task_file_seq_ops = {
427 .start = task_file_seq_start,
428 .next = task_file_seq_next,
429 .stop = task_file_seq_stop,
430 .show = task_file_seq_show,
433 struct bpf_iter_seq_task_vma_info {
434 /* The first field must be struct bpf_iter_seq_task_common.
435 * this is assumed by {init, fini}_seq_pidns() callback functions.
437 struct bpf_iter_seq_task_common common;
438 struct task_struct *task;
439 struct mm_struct *mm;
440 struct vm_area_struct *vma;
442 unsigned long prev_vm_start;
443 unsigned long prev_vm_end;
446 enum bpf_task_vma_iter_find_op {
447 task_vma_iter_first_vma, /* use find_vma() with addr 0 */
448 task_vma_iter_next_vma, /* use vma_next() with curr_vma */
449 task_vma_iter_find_vma, /* use find_vma() to find next vma */
452 static struct vm_area_struct *
453 task_vma_seq_get_next(struct bpf_iter_seq_task_vma_info *info)
455 enum bpf_task_vma_iter_find_op op;
456 struct vm_area_struct *curr_vma;
457 struct task_struct *curr_task;
458 struct mm_struct *curr_mm;
459 u32 saved_tid = info->tid;
461 /* If this function returns a non-NULL vma, it holds a reference to
462 * the task_struct, holds a refcount on mm->mm_users, and holds
463 * read lock on vma->mm->mmap_lock.
464 * If this function returns NULL, it does not hold any reference or
468 curr_task = info->task;
469 curr_vma = info->vma;
471 /* In case of lock contention, drop mmap_lock to unblock
474 * After relock, call find(mm, prev_vm_end - 1) to find
475 * new vma to process.
477 * +------+------+-----------+
478 * | VMA1 | VMA2 | VMA3 |
479 * +------+------+-----------+
483 * For example, curr_vma == VMA2. Before unlock, we set
488 * There are a few cases:
490 * 1) VMA2 is freed, but VMA3 exists.
492 * find_vma() will return VMA3, just process VMA3.
494 * 2) VMA2 still exists.
496 * find_vma() will return VMA2, process VMA2->next.
498 * 3) no more vma in this mm.
500 * Process the next task.
502 * 4) find_vma() returns a different vma, VMA2'.
504 * 4.1) If VMA2 covers same range as VMA2', skip VMA2',
505 * because we already covered the range;
506 * 4.2) VMA2 and VMA2' covers different ranges, process
509 if (mmap_lock_is_contended(curr_mm)) {
510 info->prev_vm_start = curr_vma->vm_start;
511 info->prev_vm_end = curr_vma->vm_end;
512 op = task_vma_iter_find_vma;
513 mmap_read_unlock(curr_mm);
514 if (mmap_read_lock_killable(curr_mm)) {
519 op = task_vma_iter_next_vma;
523 curr_task = task_seq_get_next(&info->common, &info->tid, true);
529 if (saved_tid != info->tid) {
530 /* new task, process the first vma */
531 op = task_vma_iter_first_vma;
533 /* Found the same tid, which means the user space
534 * finished data in previous buffer and read more.
535 * We dropped mmap_lock before returning to user
536 * space, so it is necessary to use find_vma() to
537 * find the next vma to process.
539 op = task_vma_iter_find_vma;
542 curr_mm = get_task_mm(curr_task);
546 if (mmap_read_lock_killable(curr_mm)) {
553 case task_vma_iter_first_vma:
554 curr_vma = find_vma(curr_mm, 0);
556 case task_vma_iter_next_vma:
557 curr_vma = find_vma(curr_mm, curr_vma->vm_end);
559 case task_vma_iter_find_vma:
560 /* We dropped mmap_lock so it is necessary to use find_vma
561 * to find the next vma. This is similar to the mechanism
562 * in show_smaps_rollup().
564 curr_vma = find_vma(curr_mm, info->prev_vm_end - 1);
565 /* case 1) and 4.2) above just use curr_vma */
567 /* check for case 2) or case 4.1) above */
569 curr_vma->vm_start == info->prev_vm_start &&
570 curr_vma->vm_end == info->prev_vm_end)
571 curr_vma = find_vma(curr_mm, curr_vma->vm_end);
575 /* case 3) above, or case 2) 4.1) with vma->next == NULL */
576 mmap_read_unlock(curr_mm);
580 info->task = curr_task;
581 info->vma = curr_vma;
586 if (info->common.type == BPF_TASK_ITER_TID)
589 put_task_struct(curr_task);
597 put_task_struct(curr_task);
604 static void *task_vma_seq_start(struct seq_file *seq, loff_t *pos)
606 struct bpf_iter_seq_task_vma_info *info = seq->private;
607 struct vm_area_struct *vma;
609 vma = task_vma_seq_get_next(info);
610 if (vma && *pos == 0)
616 static void *task_vma_seq_next(struct seq_file *seq, void *v, loff_t *pos)
618 struct bpf_iter_seq_task_vma_info *info = seq->private;
621 return task_vma_seq_get_next(info);
624 struct bpf_iter__task_vma {
625 __bpf_md_ptr(struct bpf_iter_meta *, meta);
626 __bpf_md_ptr(struct task_struct *, task);
627 __bpf_md_ptr(struct vm_area_struct *, vma);
630 DEFINE_BPF_ITER_FUNC(task_vma, struct bpf_iter_meta *meta,
631 struct task_struct *task, struct vm_area_struct *vma)
633 static int __task_vma_seq_show(struct seq_file *seq, bool in_stop)
635 struct bpf_iter_seq_task_vma_info *info = seq->private;
636 struct bpf_iter__task_vma ctx;
637 struct bpf_iter_meta meta;
638 struct bpf_prog *prog;
641 prog = bpf_iter_get_info(&meta, in_stop);
646 ctx.task = info->task;
648 return bpf_iter_run_prog(prog, &ctx);
651 static int task_vma_seq_show(struct seq_file *seq, void *v)
653 return __task_vma_seq_show(seq, false);
656 static void task_vma_seq_stop(struct seq_file *seq, void *v)
658 struct bpf_iter_seq_task_vma_info *info = seq->private;
661 (void)__task_vma_seq_show(seq, true);
663 /* info->vma has not been seen by the BPF program. If the
664 * user space reads more, task_vma_seq_get_next should
665 * return this vma again. Set prev_vm_start to ~0UL,
666 * so that we don't skip the vma returned by the next
667 * find_vma() (case task_vma_iter_find_vma in
668 * task_vma_seq_get_next()).
670 info->prev_vm_start = ~0UL;
671 info->prev_vm_end = info->vma->vm_end;
672 mmap_read_unlock(info->mm);
675 put_task_struct(info->task);
680 static const struct seq_operations task_vma_seq_ops = {
681 .start = task_vma_seq_start,
682 .next = task_vma_seq_next,
683 .stop = task_vma_seq_stop,
684 .show = task_vma_seq_show,
687 static const struct bpf_iter_seq_info task_seq_info = {
688 .seq_ops = &task_seq_ops,
689 .init_seq_private = init_seq_pidns,
690 .fini_seq_private = fini_seq_pidns,
691 .seq_priv_size = sizeof(struct bpf_iter_seq_task_info),
694 static int bpf_iter_fill_link_info(const struct bpf_iter_aux_info *aux, struct bpf_link_info *info)
696 switch (aux->task.type) {
697 case BPF_TASK_ITER_TID:
698 info->iter.task.tid = aux->task.pid;
700 case BPF_TASK_ITER_TGID:
701 info->iter.task.pid = aux->task.pid;
709 static void bpf_iter_task_show_fdinfo(const struct bpf_iter_aux_info *aux, struct seq_file *seq)
711 seq_printf(seq, "task_type:\t%s\n", iter_task_type_names[aux->task.type]);
712 if (aux->task.type == BPF_TASK_ITER_TID)
713 seq_printf(seq, "tid:\t%u\n", aux->task.pid);
714 else if (aux->task.type == BPF_TASK_ITER_TGID)
715 seq_printf(seq, "pid:\t%u\n", aux->task.pid);
718 static struct bpf_iter_reg task_reg_info = {
720 .attach_target = bpf_iter_attach_task,
721 .feature = BPF_ITER_RESCHED,
722 .ctx_arg_info_size = 1,
724 { offsetof(struct bpf_iter__task, task),
725 PTR_TO_BTF_ID_OR_NULL },
727 .seq_info = &task_seq_info,
728 .fill_link_info = bpf_iter_fill_link_info,
729 .show_fdinfo = bpf_iter_task_show_fdinfo,
732 static const struct bpf_iter_seq_info task_file_seq_info = {
733 .seq_ops = &task_file_seq_ops,
734 .init_seq_private = init_seq_pidns,
735 .fini_seq_private = fini_seq_pidns,
736 .seq_priv_size = sizeof(struct bpf_iter_seq_task_file_info),
739 static struct bpf_iter_reg task_file_reg_info = {
740 .target = "task_file",
741 .attach_target = bpf_iter_attach_task,
742 .feature = BPF_ITER_RESCHED,
743 .ctx_arg_info_size = 2,
745 { offsetof(struct bpf_iter__task_file, task),
746 PTR_TO_BTF_ID_OR_NULL },
747 { offsetof(struct bpf_iter__task_file, file),
748 PTR_TO_BTF_ID_OR_NULL },
750 .seq_info = &task_file_seq_info,
751 .fill_link_info = bpf_iter_fill_link_info,
752 .show_fdinfo = bpf_iter_task_show_fdinfo,
755 static const struct bpf_iter_seq_info task_vma_seq_info = {
756 .seq_ops = &task_vma_seq_ops,
757 .init_seq_private = init_seq_pidns,
758 .fini_seq_private = fini_seq_pidns,
759 .seq_priv_size = sizeof(struct bpf_iter_seq_task_vma_info),
762 static struct bpf_iter_reg task_vma_reg_info = {
763 .target = "task_vma",
764 .attach_target = bpf_iter_attach_task,
765 .feature = BPF_ITER_RESCHED,
766 .ctx_arg_info_size = 2,
768 { offsetof(struct bpf_iter__task_vma, task),
769 PTR_TO_BTF_ID_OR_NULL },
770 { offsetof(struct bpf_iter__task_vma, vma),
771 PTR_TO_BTF_ID_OR_NULL },
773 .seq_info = &task_vma_seq_info,
774 .fill_link_info = bpf_iter_fill_link_info,
775 .show_fdinfo = bpf_iter_task_show_fdinfo,
778 BPF_CALL_5(bpf_find_vma, struct task_struct *, task, u64, start,
779 bpf_callback_t, callback_fn, void *, callback_ctx, u64, flags)
781 struct mmap_unlock_irq_work *work = NULL;
782 struct vm_area_struct *vma;
783 bool irq_work_busy = false;
784 struct mm_struct *mm;
797 irq_work_busy = bpf_mmap_unlock_get_irq_work(&work);
799 if (irq_work_busy || !mmap_read_trylock(mm))
802 vma = find_vma(mm, start);
804 if (vma && vma->vm_start <= start && vma->vm_end > start) {
805 callback_fn((u64)(long)task, (u64)(long)vma,
806 (u64)(long)callback_ctx, 0, 0);
809 bpf_mmap_unlock_mm(work, mm);
813 const struct bpf_func_proto bpf_find_vma_proto = {
814 .func = bpf_find_vma,
815 .ret_type = RET_INTEGER,
816 .arg1_type = ARG_PTR_TO_BTF_ID,
817 .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
818 .arg2_type = ARG_ANYTHING,
819 .arg3_type = ARG_PTR_TO_FUNC,
820 .arg4_type = ARG_PTR_TO_STACK_OR_NULL,
821 .arg5_type = ARG_ANYTHING,
824 DEFINE_PER_CPU(struct mmap_unlock_irq_work, mmap_unlock_work);
826 static void do_mmap_read_unlock(struct irq_work *entry)
828 struct mmap_unlock_irq_work *work;
830 if (WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT)))
833 work = container_of(entry, struct mmap_unlock_irq_work, irq_work);
834 mmap_read_unlock_non_owner(work->mm);
837 static int __init task_iter_init(void)
839 struct mmap_unlock_irq_work *work;
842 for_each_possible_cpu(cpu) {
843 work = per_cpu_ptr(&mmap_unlock_work, cpu);
844 init_irq_work(&work->irq_work, do_mmap_read_unlock);
847 task_reg_info.ctx_arg_info[0].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_TASK];
848 ret = bpf_iter_reg_target(&task_reg_info);
852 task_file_reg_info.ctx_arg_info[0].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_TASK];
853 task_file_reg_info.ctx_arg_info[1].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_FILE];
854 ret = bpf_iter_reg_target(&task_file_reg_info);
858 task_vma_reg_info.ctx_arg_info[0].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_TASK];
859 task_vma_reg_info.ctx_arg_info[1].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA];
860 return bpf_iter_reg_target(&task_vma_reg_info);
862 late_initcall(task_iter_init);