Merge branch 'for-rc8-5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/pavel...
[linux-2.6-microblaze.git] / kernel / bpf / stackmap.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2016 Facebook
3  */
4 #include <linux/bpf.h>
5 #include <linux/jhash.h>
6 #include <linux/filter.h>
7 #include <linux/kernel.h>
8 #include <linux/stacktrace.h>
9 #include <linux/perf_event.h>
10 #include <linux/elf.h>
11 #include <linux/pagemap.h>
12 #include <linux/irq_work.h>
13 #include <linux/btf_ids.h>
14 #include "percpu_freelist.h"
15
16 #define STACK_CREATE_FLAG_MASK                                  \
17         (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY |        \
18          BPF_F_STACK_BUILD_ID)
19
20 struct stack_map_bucket {
21         struct pcpu_freelist_node fnode;
22         u32 hash;
23         u32 nr;
24         u64 data[];
25 };
26
27 struct bpf_stack_map {
28         struct bpf_map map;
29         void *elems;
30         struct pcpu_freelist freelist;
31         u32 n_buckets;
32         struct stack_map_bucket *buckets[];
33 };
34
35 /* irq_work to run up_read() for build_id lookup in nmi context */
36 struct stack_map_irq_work {
37         struct irq_work irq_work;
38         struct mm_struct *mm;
39 };
40
41 static void do_up_read(struct irq_work *entry)
42 {
43         struct stack_map_irq_work *work;
44
45         if (WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT)))
46                 return;
47
48         work = container_of(entry, struct stack_map_irq_work, irq_work);
49         mmap_read_unlock_non_owner(work->mm);
50 }
51
52 static DEFINE_PER_CPU(struct stack_map_irq_work, up_read_work);
53
54 static inline bool stack_map_use_build_id(struct bpf_map *map)
55 {
56         return (map->map_flags & BPF_F_STACK_BUILD_ID);
57 }
58
59 static inline int stack_map_data_size(struct bpf_map *map)
60 {
61         return stack_map_use_build_id(map) ?
62                 sizeof(struct bpf_stack_build_id) : sizeof(u64);
63 }
64
65 static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
66 {
67         u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size;
68         int err;
69
70         smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries,
71                                          smap->map.numa_node);
72         if (!smap->elems)
73                 return -ENOMEM;
74
75         err = pcpu_freelist_init(&smap->freelist);
76         if (err)
77                 goto free_elems;
78
79         pcpu_freelist_populate(&smap->freelist, smap->elems, elem_size,
80                                smap->map.max_entries);
81         return 0;
82
83 free_elems:
84         bpf_map_area_free(smap->elems);
85         return err;
86 }
87
88 /* Called from syscall */
89 static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
90 {
91         u32 value_size = attr->value_size;
92         struct bpf_stack_map *smap;
93         u64 cost, n_buckets;
94         int err;
95
96         if (!bpf_capable())
97                 return ERR_PTR(-EPERM);
98
99         if (attr->map_flags & ~STACK_CREATE_FLAG_MASK)
100                 return ERR_PTR(-EINVAL);
101
102         /* check sanity of attributes */
103         if (attr->max_entries == 0 || attr->key_size != 4 ||
104             value_size < 8 || value_size % 8)
105                 return ERR_PTR(-EINVAL);
106
107         BUILD_BUG_ON(sizeof(struct bpf_stack_build_id) % sizeof(u64));
108         if (attr->map_flags & BPF_F_STACK_BUILD_ID) {
109                 if (value_size % sizeof(struct bpf_stack_build_id) ||
110                     value_size / sizeof(struct bpf_stack_build_id)
111                     > sysctl_perf_event_max_stack)
112                         return ERR_PTR(-EINVAL);
113         } else if (value_size / 8 > sysctl_perf_event_max_stack)
114                 return ERR_PTR(-EINVAL);
115
116         /* hash table size must be power of 2 */
117         n_buckets = roundup_pow_of_two(attr->max_entries);
118         if (!n_buckets)
119                 return ERR_PTR(-E2BIG);
120
121         cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
122         cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
123         smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr));
124         if (!smap)
125                 return ERR_PTR(-ENOMEM);
126
127         bpf_map_init_from_attr(&smap->map, attr);
128         smap->map.value_size = value_size;
129         smap->n_buckets = n_buckets;
130
131         err = get_callchain_buffers(sysctl_perf_event_max_stack);
132         if (err)
133                 goto free_smap;
134
135         err = prealloc_elems_and_freelist(smap);
136         if (err)
137                 goto put_buffers;
138
139         return &smap->map;
140
141 put_buffers:
142         put_callchain_buffers();
143 free_smap:
144         bpf_map_area_free(smap);
145         return ERR_PTR(err);
146 }
147
148 #define BPF_BUILD_ID 3
149 /*
150  * Parse build id from the note segment. This logic can be shared between
151  * 32-bit and 64-bit system, because Elf32_Nhdr and Elf64_Nhdr are
152  * identical.
153  */
154 static inline int stack_map_parse_build_id(void *page_addr,
155                                            unsigned char *build_id,
156                                            void *note_start,
157                                            Elf32_Word note_size)
158 {
159         Elf32_Word note_offs = 0, new_offs;
160
161         /* check for overflow */
162         if (note_start < page_addr || note_start + note_size < note_start)
163                 return -EINVAL;
164
165         /* only supports note that fits in the first page */
166         if (note_start + note_size > page_addr + PAGE_SIZE)
167                 return -EINVAL;
168
169         while (note_offs + sizeof(Elf32_Nhdr) < note_size) {
170                 Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_offs);
171
172                 if (nhdr->n_type == BPF_BUILD_ID &&
173                     nhdr->n_namesz == sizeof("GNU") &&
174                     nhdr->n_descsz > 0 &&
175                     nhdr->n_descsz <= BPF_BUILD_ID_SIZE) {
176                         memcpy(build_id,
177                                note_start + note_offs +
178                                ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr),
179                                nhdr->n_descsz);
180                         memset(build_id + nhdr->n_descsz, 0,
181                                BPF_BUILD_ID_SIZE - nhdr->n_descsz);
182                         return 0;
183                 }
184                 new_offs = note_offs + sizeof(Elf32_Nhdr) +
185                         ALIGN(nhdr->n_namesz, 4) + ALIGN(nhdr->n_descsz, 4);
186                 if (new_offs <= note_offs)  /* overflow */
187                         break;
188                 note_offs = new_offs;
189         }
190         return -EINVAL;
191 }
192
193 /* Parse build ID from 32-bit ELF */
194 static int stack_map_get_build_id_32(void *page_addr,
195                                      unsigned char *build_id)
196 {
197         Elf32_Ehdr *ehdr = (Elf32_Ehdr *)page_addr;
198         Elf32_Phdr *phdr;
199         int i;
200
201         /* only supports phdr that fits in one page */
202         if (ehdr->e_phnum >
203             (PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr))
204                 return -EINVAL;
205
206         phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr));
207
208         for (i = 0; i < ehdr->e_phnum; ++i) {
209                 if (phdr[i].p_type == PT_NOTE &&
210                     !stack_map_parse_build_id(page_addr, build_id,
211                                               page_addr + phdr[i].p_offset,
212                                               phdr[i].p_filesz))
213                         return 0;
214         }
215         return -EINVAL;
216 }
217
218 /* Parse build ID from 64-bit ELF */
219 static int stack_map_get_build_id_64(void *page_addr,
220                                      unsigned char *build_id)
221 {
222         Elf64_Ehdr *ehdr = (Elf64_Ehdr *)page_addr;
223         Elf64_Phdr *phdr;
224         int i;
225
226         /* only supports phdr that fits in one page */
227         if (ehdr->e_phnum >
228             (PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr))
229                 return -EINVAL;
230
231         phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr));
232
233         for (i = 0; i < ehdr->e_phnum; ++i) {
234                 if (phdr[i].p_type == PT_NOTE &&
235                     !stack_map_parse_build_id(page_addr, build_id,
236                                               page_addr + phdr[i].p_offset,
237                                               phdr[i].p_filesz))
238                         return 0;
239         }
240         return -EINVAL;
241 }
242
243 /* Parse build ID of ELF file mapped to vma */
244 static int stack_map_get_build_id(struct vm_area_struct *vma,
245                                   unsigned char *build_id)
246 {
247         Elf32_Ehdr *ehdr;
248         struct page *page;
249         void *page_addr;
250         int ret;
251
252         /* only works for page backed storage  */
253         if (!vma->vm_file)
254                 return -EINVAL;
255
256         page = find_get_page(vma->vm_file->f_mapping, 0);
257         if (!page)
258                 return -EFAULT; /* page not mapped */
259
260         ret = -EINVAL;
261         page_addr = kmap_atomic(page);
262         ehdr = (Elf32_Ehdr *)page_addr;
263
264         /* compare magic x7f "ELF" */
265         if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0)
266                 goto out;
267
268         /* only support executable file and shared object file */
269         if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN)
270                 goto out;
271
272         if (ehdr->e_ident[EI_CLASS] == ELFCLASS32)
273                 ret = stack_map_get_build_id_32(page_addr, build_id);
274         else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
275                 ret = stack_map_get_build_id_64(page_addr, build_id);
276 out:
277         kunmap_atomic(page_addr);
278         put_page(page);
279         return ret;
280 }
281
282 static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
283                                           u64 *ips, u32 trace_nr, bool user)
284 {
285         int i;
286         struct vm_area_struct *vma;
287         bool irq_work_busy = false;
288         struct stack_map_irq_work *work = NULL;
289
290         if (irqs_disabled()) {
291                 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
292                         work = this_cpu_ptr(&up_read_work);
293                         if (irq_work_is_busy(&work->irq_work)) {
294                                 /* cannot queue more up_read, fallback */
295                                 irq_work_busy = true;
296                         }
297                 } else {
298                         /*
299                          * PREEMPT_RT does not allow to trylock mmap sem in
300                          * interrupt disabled context. Force the fallback code.
301                          */
302                         irq_work_busy = true;
303                 }
304         }
305
306         /*
307          * We cannot do up_read() when the irq is disabled, because of
308          * risk to deadlock with rq_lock. To do build_id lookup when the
309          * irqs are disabled, we need to run up_read() in irq_work. We use
310          * a percpu variable to do the irq_work. If the irq_work is
311          * already used by another lookup, we fall back to report ips.
312          *
313          * Same fallback is used for kernel stack (!user) on a stackmap
314          * with build_id.
315          */
316         if (!user || !current || !current->mm || irq_work_busy ||
317             !mmap_read_trylock_non_owner(current->mm)) {
318                 /* cannot access current->mm, fall back to ips */
319                 for (i = 0; i < trace_nr; i++) {
320                         id_offs[i].status = BPF_STACK_BUILD_ID_IP;
321                         id_offs[i].ip = ips[i];
322                         memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
323                 }
324                 return;
325         }
326
327         for (i = 0; i < trace_nr; i++) {
328                 vma = find_vma(current->mm, ips[i]);
329                 if (!vma || stack_map_get_build_id(vma, id_offs[i].build_id)) {
330                         /* per entry fall back to ips */
331                         id_offs[i].status = BPF_STACK_BUILD_ID_IP;
332                         id_offs[i].ip = ips[i];
333                         memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
334                         continue;
335                 }
336                 id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
337                         - vma->vm_start;
338                 id_offs[i].status = BPF_STACK_BUILD_ID_VALID;
339         }
340
341         if (!work) {
342                 mmap_read_unlock_non_owner(current->mm);
343         } else {
344                 work->mm = current->mm;
345                 irq_work_queue(&work->irq_work);
346         }
347 }
348
349 static struct perf_callchain_entry *
350 get_callchain_entry_for_task(struct task_struct *task, u32 init_nr)
351 {
352 #ifdef CONFIG_STACKTRACE
353         struct perf_callchain_entry *entry;
354         int rctx;
355
356         entry = get_callchain_entry(&rctx);
357
358         if (!entry)
359                 return NULL;
360
361         entry->nr = init_nr +
362                 stack_trace_save_tsk(task, (unsigned long *)(entry->ip + init_nr),
363                                      sysctl_perf_event_max_stack - init_nr, 0);
364
365         /* stack_trace_save_tsk() works on unsigned long array, while
366          * perf_callchain_entry uses u64 array. For 32-bit systems, it is
367          * necessary to fix this mismatch.
368          */
369         if (__BITS_PER_LONG != 64) {
370                 unsigned long *from = (unsigned long *) entry->ip;
371                 u64 *to = entry->ip;
372                 int i;
373
374                 /* copy data from the end to avoid using extra buffer */
375                 for (i = entry->nr - 1; i >= (int)init_nr; i--)
376                         to[i] = (u64)(from[i]);
377         }
378
379         put_callchain_entry(rctx);
380
381         return entry;
382 #else /* CONFIG_STACKTRACE */
383         return NULL;
384 #endif
385 }
386
387 static long __bpf_get_stackid(struct bpf_map *map,
388                               struct perf_callchain_entry *trace, u64 flags)
389 {
390         struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
391         struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
392         u32 max_depth = map->value_size / stack_map_data_size(map);
393         /* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */
394         u32 init_nr = sysctl_perf_event_max_stack - max_depth;
395         u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
396         u32 hash, id, trace_nr, trace_len;
397         bool user = flags & BPF_F_USER_STACK;
398         u64 *ips;
399         bool hash_matches;
400
401         /* get_perf_callchain() guarantees that trace->nr >= init_nr
402          * and trace-nr <= sysctl_perf_event_max_stack, so trace_nr <= max_depth
403          */
404         trace_nr = trace->nr - init_nr;
405
406         if (trace_nr <= skip)
407                 /* skipping more than usable stack trace */
408                 return -EFAULT;
409
410         trace_nr -= skip;
411         trace_len = trace_nr * sizeof(u64);
412         ips = trace->ip + skip + init_nr;
413         hash = jhash2((u32 *)ips, trace_len / sizeof(u32), 0);
414         id = hash & (smap->n_buckets - 1);
415         bucket = READ_ONCE(smap->buckets[id]);
416
417         hash_matches = bucket && bucket->hash == hash;
418         /* fast cmp */
419         if (hash_matches && flags & BPF_F_FAST_STACK_CMP)
420                 return id;
421
422         if (stack_map_use_build_id(map)) {
423                 /* for build_id+offset, pop a bucket before slow cmp */
424                 new_bucket = (struct stack_map_bucket *)
425                         pcpu_freelist_pop(&smap->freelist);
426                 if (unlikely(!new_bucket))
427                         return -ENOMEM;
428                 new_bucket->nr = trace_nr;
429                 stack_map_get_build_id_offset(
430                         (struct bpf_stack_build_id *)new_bucket->data,
431                         ips, trace_nr, user);
432                 trace_len = trace_nr * sizeof(struct bpf_stack_build_id);
433                 if (hash_matches && bucket->nr == trace_nr &&
434                     memcmp(bucket->data, new_bucket->data, trace_len) == 0) {
435                         pcpu_freelist_push(&smap->freelist, &new_bucket->fnode);
436                         return id;
437                 }
438                 if (bucket && !(flags & BPF_F_REUSE_STACKID)) {
439                         pcpu_freelist_push(&smap->freelist, &new_bucket->fnode);
440                         return -EEXIST;
441                 }
442         } else {
443                 if (hash_matches && bucket->nr == trace_nr &&
444                     memcmp(bucket->data, ips, trace_len) == 0)
445                         return id;
446                 if (bucket && !(flags & BPF_F_REUSE_STACKID))
447                         return -EEXIST;
448
449                 new_bucket = (struct stack_map_bucket *)
450                         pcpu_freelist_pop(&smap->freelist);
451                 if (unlikely(!new_bucket))
452                         return -ENOMEM;
453                 memcpy(new_bucket->data, ips, trace_len);
454         }
455
456         new_bucket->hash = hash;
457         new_bucket->nr = trace_nr;
458
459         old_bucket = xchg(&smap->buckets[id], new_bucket);
460         if (old_bucket)
461                 pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
462         return id;
463 }
464
465 BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
466            u64, flags)
467 {
468         u32 max_depth = map->value_size / stack_map_data_size(map);
469         /* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */
470         u32 init_nr = sysctl_perf_event_max_stack - max_depth;
471         bool user = flags & BPF_F_USER_STACK;
472         struct perf_callchain_entry *trace;
473         bool kernel = !user;
474
475         if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
476                                BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
477                 return -EINVAL;
478
479         trace = get_perf_callchain(regs, init_nr, kernel, user,
480                                    sysctl_perf_event_max_stack, false, false);
481
482         if (unlikely(!trace))
483                 /* couldn't fetch the stack trace */
484                 return -EFAULT;
485
486         return __bpf_get_stackid(map, trace, flags);
487 }
488
489 const struct bpf_func_proto bpf_get_stackid_proto = {
490         .func           = bpf_get_stackid,
491         .gpl_only       = true,
492         .ret_type       = RET_INTEGER,
493         .arg1_type      = ARG_PTR_TO_CTX,
494         .arg2_type      = ARG_CONST_MAP_PTR,
495         .arg3_type      = ARG_ANYTHING,
496 };
497
498 static __u64 count_kernel_ip(struct perf_callchain_entry *trace)
499 {
500         __u64 nr_kernel = 0;
501
502         while (nr_kernel < trace->nr) {
503                 if (trace->ip[nr_kernel] == PERF_CONTEXT_USER)
504                         break;
505                 nr_kernel++;
506         }
507         return nr_kernel;
508 }
509
510 BPF_CALL_3(bpf_get_stackid_pe, struct bpf_perf_event_data_kern *, ctx,
511            struct bpf_map *, map, u64, flags)
512 {
513         struct perf_event *event = ctx->event;
514         struct perf_callchain_entry *trace;
515         bool kernel, user;
516         __u64 nr_kernel;
517         int ret;
518
519         /* perf_sample_data doesn't have callchain, use bpf_get_stackid */
520         if (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY))
521                 return bpf_get_stackid((unsigned long)(ctx->regs),
522                                        (unsigned long) map, flags, 0, 0);
523
524         if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
525                                BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
526                 return -EINVAL;
527
528         user = flags & BPF_F_USER_STACK;
529         kernel = !user;
530
531         trace = ctx->data->callchain;
532         if (unlikely(!trace))
533                 return -EFAULT;
534
535         nr_kernel = count_kernel_ip(trace);
536
537         if (kernel) {
538                 __u64 nr = trace->nr;
539
540                 trace->nr = nr_kernel;
541                 ret = __bpf_get_stackid(map, trace, flags);
542
543                 /* restore nr */
544                 trace->nr = nr;
545         } else { /* user */
546                 u64 skip = flags & BPF_F_SKIP_FIELD_MASK;
547
548                 skip += nr_kernel;
549                 if (skip > BPF_F_SKIP_FIELD_MASK)
550                         return -EFAULT;
551
552                 flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip;
553                 ret = __bpf_get_stackid(map, trace, flags);
554         }
555         return ret;
556 }
557
558 const struct bpf_func_proto bpf_get_stackid_proto_pe = {
559         .func           = bpf_get_stackid_pe,
560         .gpl_only       = false,
561         .ret_type       = RET_INTEGER,
562         .arg1_type      = ARG_PTR_TO_CTX,
563         .arg2_type      = ARG_CONST_MAP_PTR,
564         .arg3_type      = ARG_ANYTHING,
565 };
566
567 static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
568                             struct perf_callchain_entry *trace_in,
569                             void *buf, u32 size, u64 flags)
570 {
571         u32 init_nr, trace_nr, copy_len, elem_size, num_elem;
572         bool user_build_id = flags & BPF_F_USER_BUILD_ID;
573         u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
574         bool user = flags & BPF_F_USER_STACK;
575         struct perf_callchain_entry *trace;
576         bool kernel = !user;
577         int err = -EINVAL;
578         u64 *ips;
579
580         if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
581                                BPF_F_USER_BUILD_ID)))
582                 goto clear;
583         if (kernel && user_build_id)
584                 goto clear;
585
586         elem_size = (user && user_build_id) ? sizeof(struct bpf_stack_build_id)
587                                             : sizeof(u64);
588         if (unlikely(size % elem_size))
589                 goto clear;
590
591         /* cannot get valid user stack for task without user_mode regs */
592         if (task && user && !user_mode(regs))
593                 goto err_fault;
594
595         num_elem = size / elem_size;
596         if (sysctl_perf_event_max_stack < num_elem)
597                 init_nr = 0;
598         else
599                 init_nr = sysctl_perf_event_max_stack - num_elem;
600
601         if (trace_in)
602                 trace = trace_in;
603         else if (kernel && task)
604                 trace = get_callchain_entry_for_task(task, init_nr);
605         else
606                 trace = get_perf_callchain(regs, init_nr, kernel, user,
607                                            sysctl_perf_event_max_stack,
608                                            false, false);
609         if (unlikely(!trace))
610                 goto err_fault;
611
612         trace_nr = trace->nr - init_nr;
613         if (trace_nr < skip)
614                 goto err_fault;
615
616         trace_nr -= skip;
617         trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem;
618         copy_len = trace_nr * elem_size;
619         ips = trace->ip + skip + init_nr;
620         if (user && user_build_id)
621                 stack_map_get_build_id_offset(buf, ips, trace_nr, user);
622         else
623                 memcpy(buf, ips, copy_len);
624
625         if (size > copy_len)
626                 memset(buf + copy_len, 0, size - copy_len);
627         return copy_len;
628
629 err_fault:
630         err = -EFAULT;
631 clear:
632         memset(buf, 0, size);
633         return err;
634 }
635
636 BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size,
637            u64, flags)
638 {
639         return __bpf_get_stack(regs, NULL, NULL, buf, size, flags);
640 }
641
642 const struct bpf_func_proto bpf_get_stack_proto = {
643         .func           = bpf_get_stack,
644         .gpl_only       = true,
645         .ret_type       = RET_INTEGER,
646         .arg1_type      = ARG_PTR_TO_CTX,
647         .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
648         .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
649         .arg4_type      = ARG_ANYTHING,
650 };
651
652 BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
653            u32, size, u64, flags)
654 {
655         struct pt_regs *regs = task_pt_regs(task);
656
657         return __bpf_get_stack(regs, task, NULL, buf, size, flags);
658 }
659
660 BTF_ID_LIST_SINGLE(bpf_get_task_stack_btf_ids, struct, task_struct)
661
662 const struct bpf_func_proto bpf_get_task_stack_proto = {
663         .func           = bpf_get_task_stack,
664         .gpl_only       = false,
665         .ret_type       = RET_INTEGER,
666         .arg1_type      = ARG_PTR_TO_BTF_ID,
667         .arg1_btf_id    = &bpf_get_task_stack_btf_ids[0],
668         .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
669         .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
670         .arg4_type      = ARG_ANYTHING,
671 };
672
673 BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx,
674            void *, buf, u32, size, u64, flags)
675 {
676         struct pt_regs *regs = (struct pt_regs *)(ctx->regs);
677         struct perf_event *event = ctx->event;
678         struct perf_callchain_entry *trace;
679         bool kernel, user;
680         int err = -EINVAL;
681         __u64 nr_kernel;
682
683         if (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY))
684                 return __bpf_get_stack(regs, NULL, NULL, buf, size, flags);
685
686         if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
687                                BPF_F_USER_BUILD_ID)))
688                 goto clear;
689
690         user = flags & BPF_F_USER_STACK;
691         kernel = !user;
692
693         err = -EFAULT;
694         trace = ctx->data->callchain;
695         if (unlikely(!trace))
696                 goto clear;
697
698         nr_kernel = count_kernel_ip(trace);
699
700         if (kernel) {
701                 __u64 nr = trace->nr;
702
703                 trace->nr = nr_kernel;
704                 err = __bpf_get_stack(regs, NULL, trace, buf, size, flags);
705
706                 /* restore nr */
707                 trace->nr = nr;
708         } else { /* user */
709                 u64 skip = flags & BPF_F_SKIP_FIELD_MASK;
710
711                 skip += nr_kernel;
712                 if (skip > BPF_F_SKIP_FIELD_MASK)
713                         goto clear;
714
715                 flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip;
716                 err = __bpf_get_stack(regs, NULL, trace, buf, size, flags);
717         }
718         return err;
719
720 clear:
721         memset(buf, 0, size);
722         return err;
723
724 }
725
726 const struct bpf_func_proto bpf_get_stack_proto_pe = {
727         .func           = bpf_get_stack_pe,
728         .gpl_only       = true,
729         .ret_type       = RET_INTEGER,
730         .arg1_type      = ARG_PTR_TO_CTX,
731         .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
732         .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
733         .arg4_type      = ARG_ANYTHING,
734 };
735
736 /* Called from eBPF program */
737 static void *stack_map_lookup_elem(struct bpf_map *map, void *key)
738 {
739         return ERR_PTR(-EOPNOTSUPP);
740 }
741
742 /* Called from syscall */
743 int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
744 {
745         struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
746         struct stack_map_bucket *bucket, *old_bucket;
747         u32 id = *(u32 *)key, trace_len;
748
749         if (unlikely(id >= smap->n_buckets))
750                 return -ENOENT;
751
752         bucket = xchg(&smap->buckets[id], NULL);
753         if (!bucket)
754                 return -ENOENT;
755
756         trace_len = bucket->nr * stack_map_data_size(map);
757         memcpy(value, bucket->data, trace_len);
758         memset(value + trace_len, 0, map->value_size - trace_len);
759
760         old_bucket = xchg(&smap->buckets[id], bucket);
761         if (old_bucket)
762                 pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
763         return 0;
764 }
765
766 static int stack_map_get_next_key(struct bpf_map *map, void *key,
767                                   void *next_key)
768 {
769         struct bpf_stack_map *smap = container_of(map,
770                                                   struct bpf_stack_map, map);
771         u32 id;
772
773         WARN_ON_ONCE(!rcu_read_lock_held());
774
775         if (!key) {
776                 id = 0;
777         } else {
778                 id = *(u32 *)key;
779                 if (id >= smap->n_buckets || !smap->buckets[id])
780                         id = 0;
781                 else
782                         id++;
783         }
784
785         while (id < smap->n_buckets && !smap->buckets[id])
786                 id++;
787
788         if (id >= smap->n_buckets)
789                 return -ENOENT;
790
791         *(u32 *)next_key = id;
792         return 0;
793 }
794
795 static int stack_map_update_elem(struct bpf_map *map, void *key, void *value,
796                                  u64 map_flags)
797 {
798         return -EINVAL;
799 }
800
801 /* Called from syscall or from eBPF program */
802 static int stack_map_delete_elem(struct bpf_map *map, void *key)
803 {
804         struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
805         struct stack_map_bucket *old_bucket;
806         u32 id = *(u32 *)key;
807
808         if (unlikely(id >= smap->n_buckets))
809                 return -E2BIG;
810
811         old_bucket = xchg(&smap->buckets[id], NULL);
812         if (old_bucket) {
813                 pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
814                 return 0;
815         } else {
816                 return -ENOENT;
817         }
818 }
819
820 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
821 static void stack_map_free(struct bpf_map *map)
822 {
823         struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
824
825         bpf_map_area_free(smap->elems);
826         pcpu_freelist_destroy(&smap->freelist);
827         bpf_map_area_free(smap);
828         put_callchain_buffers();
829 }
830
831 static int stack_trace_map_btf_id;
832 const struct bpf_map_ops stack_trace_map_ops = {
833         .map_meta_equal = bpf_map_meta_equal,
834         .map_alloc = stack_map_alloc,
835         .map_free = stack_map_free,
836         .map_get_next_key = stack_map_get_next_key,
837         .map_lookup_elem = stack_map_lookup_elem,
838         .map_update_elem = stack_map_update_elem,
839         .map_delete_elem = stack_map_delete_elem,
840         .map_check_btf = map_check_no_btf,
841         .map_btf_name = "bpf_stack_map",
842         .map_btf_id = &stack_trace_map_btf_id,
843 };
844
845 static int __init stack_map_init(void)
846 {
847         int cpu;
848         struct stack_map_irq_work *work;
849
850         for_each_possible_cpu(cpu) {
851                 work = per_cpu_ptr(&up_read_work, cpu);
852                 init_irq_work(&work->irq_work, do_up_read);
853         }
854         return 0;
855 }
856 subsys_initcall(stack_map_init);