clk: core: replace clk_{readl,writel} with {readl,writel}
[linux-2.6-microblaze.git] / fs / proc / task_mmu.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/mm.h>
3 #include <linux/vmacache.h>
4 #include <linux/hugetlb.h>
5 #include <linux/huge_mm.h>
6 #include <linux/mount.h>
7 #include <linux/seq_file.h>
8 #include <linux/highmem.h>
9 #include <linux/ptrace.h>
10 #include <linux/slab.h>
11 #include <linux/pagemap.h>
12 #include <linux/mempolicy.h>
13 #include <linux/rmap.h>
14 #include <linux/swap.h>
15 #include <linux/sched/mm.h>
16 #include <linux/swapops.h>
17 #include <linux/mmu_notifier.h>
18 #include <linux/page_idle.h>
19 #include <linux/shmem_fs.h>
20 #include <linux/uaccess.h>
21 #include <linux/pkeys.h>
22
23 #include <asm/elf.h>
24 #include <asm/tlb.h>
25 #include <asm/tlbflush.h>
26 #include "internal.h"
27
28 #define SEQ_PUT_DEC(str, val) \
29                 seq_put_decimal_ull_width(m, str, (val) << (PAGE_SHIFT-10), 8)
30 void task_mem(struct seq_file *m, struct mm_struct *mm)
31 {
32         unsigned long text, lib, swap, anon, file, shmem;
33         unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
34
35         anon = get_mm_counter(mm, MM_ANONPAGES);
36         file = get_mm_counter(mm, MM_FILEPAGES);
37         shmem = get_mm_counter(mm, MM_SHMEMPAGES);
38
39         /*
40          * Note: to minimize their overhead, mm maintains hiwater_vm and
41          * hiwater_rss only when about to *lower* total_vm or rss.  Any
42          * collector of these hiwater stats must therefore get total_vm
43          * and rss too, which will usually be the higher.  Barriers? not
44          * worth the effort, such snapshots can always be inconsistent.
45          */
46         hiwater_vm = total_vm = mm->total_vm;
47         if (hiwater_vm < mm->hiwater_vm)
48                 hiwater_vm = mm->hiwater_vm;
49         hiwater_rss = total_rss = anon + file + shmem;
50         if (hiwater_rss < mm->hiwater_rss)
51                 hiwater_rss = mm->hiwater_rss;
52
53         /* split executable areas between text and lib */
54         text = PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK);
55         text = min(text, mm->exec_vm << PAGE_SHIFT);
56         lib = (mm->exec_vm << PAGE_SHIFT) - text;
57
58         swap = get_mm_counter(mm, MM_SWAPENTS);
59         SEQ_PUT_DEC("VmPeak:\t", hiwater_vm);
60         SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm);
61         SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm);
62         SEQ_PUT_DEC(" kB\nVmPin:\t", atomic64_read(&mm->pinned_vm));
63         SEQ_PUT_DEC(" kB\nVmHWM:\t", hiwater_rss);
64         SEQ_PUT_DEC(" kB\nVmRSS:\t", total_rss);
65         SEQ_PUT_DEC(" kB\nRssAnon:\t", anon);
66         SEQ_PUT_DEC(" kB\nRssFile:\t", file);
67         SEQ_PUT_DEC(" kB\nRssShmem:\t", shmem);
68         SEQ_PUT_DEC(" kB\nVmData:\t", mm->data_vm);
69         SEQ_PUT_DEC(" kB\nVmStk:\t", mm->stack_vm);
70         seq_put_decimal_ull_width(m,
71                     " kB\nVmExe:\t", text >> 10, 8);
72         seq_put_decimal_ull_width(m,
73                     " kB\nVmLib:\t", lib >> 10, 8);
74         seq_put_decimal_ull_width(m,
75                     " kB\nVmPTE:\t", mm_pgtables_bytes(mm) >> 10, 8);
76         SEQ_PUT_DEC(" kB\nVmSwap:\t", swap);
77         seq_puts(m, " kB\n");
78         hugetlb_report_usage(m, mm);
79 }
80 #undef SEQ_PUT_DEC
81
82 unsigned long task_vsize(struct mm_struct *mm)
83 {
84         return PAGE_SIZE * mm->total_vm;
85 }
86
87 unsigned long task_statm(struct mm_struct *mm,
88                          unsigned long *shared, unsigned long *text,
89                          unsigned long *data, unsigned long *resident)
90 {
91         *shared = get_mm_counter(mm, MM_FILEPAGES) +
92                         get_mm_counter(mm, MM_SHMEMPAGES);
93         *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
94                                                                 >> PAGE_SHIFT;
95         *data = mm->data_vm + mm->stack_vm;
96         *resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
97         return mm->total_vm;
98 }
99
100 #ifdef CONFIG_NUMA
101 /*
102  * Save get_task_policy() for show_numa_map().
103  */
104 static void hold_task_mempolicy(struct proc_maps_private *priv)
105 {
106         struct task_struct *task = priv->task;
107
108         task_lock(task);
109         priv->task_mempolicy = get_task_policy(task);
110         mpol_get(priv->task_mempolicy);
111         task_unlock(task);
112 }
113 static void release_task_mempolicy(struct proc_maps_private *priv)
114 {
115         mpol_put(priv->task_mempolicy);
116 }
117 #else
118 static void hold_task_mempolicy(struct proc_maps_private *priv)
119 {
120 }
121 static void release_task_mempolicy(struct proc_maps_private *priv)
122 {
123 }
124 #endif
125
126 static void vma_stop(struct proc_maps_private *priv)
127 {
128         struct mm_struct *mm = priv->mm;
129
130         release_task_mempolicy(priv);
131         up_read(&mm->mmap_sem);
132         mmput(mm);
133 }
134
135 static struct vm_area_struct *
136 m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma)
137 {
138         if (vma == priv->tail_vma)
139                 return NULL;
140         return vma->vm_next ?: priv->tail_vma;
141 }
142
143 static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma)
144 {
145         if (m->count < m->size) /* vma is copied successfully */
146                 m->version = m_next_vma(m->private, vma) ? vma->vm_end : -1UL;
147 }
148
149 static void *m_start(struct seq_file *m, loff_t *ppos)
150 {
151         struct proc_maps_private *priv = m->private;
152         unsigned long last_addr = m->version;
153         struct mm_struct *mm;
154         struct vm_area_struct *vma;
155         unsigned int pos = *ppos;
156
157         /* See m_cache_vma(). Zero at the start or after lseek. */
158         if (last_addr == -1UL)
159                 return NULL;
160
161         priv->task = get_proc_task(priv->inode);
162         if (!priv->task)
163                 return ERR_PTR(-ESRCH);
164
165         mm = priv->mm;
166         if (!mm || !mmget_not_zero(mm))
167                 return NULL;
168
169         down_read(&mm->mmap_sem);
170         hold_task_mempolicy(priv);
171         priv->tail_vma = get_gate_vma(mm);
172
173         if (last_addr) {
174                 vma = find_vma(mm, last_addr - 1);
175                 if (vma && vma->vm_start <= last_addr)
176                         vma = m_next_vma(priv, vma);
177                 if (vma)
178                         return vma;
179         }
180
181         m->version = 0;
182         if (pos < mm->map_count) {
183                 for (vma = mm->mmap; pos; pos--) {
184                         m->version = vma->vm_start;
185                         vma = vma->vm_next;
186                 }
187                 return vma;
188         }
189
190         /* we do not bother to update m->version in this case */
191         if (pos == mm->map_count && priv->tail_vma)
192                 return priv->tail_vma;
193
194         vma_stop(priv);
195         return NULL;
196 }
197
198 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
199 {
200         struct proc_maps_private *priv = m->private;
201         struct vm_area_struct *next;
202
203         (*pos)++;
204         next = m_next_vma(priv, v);
205         if (!next)
206                 vma_stop(priv);
207         return next;
208 }
209
210 static void m_stop(struct seq_file *m, void *v)
211 {
212         struct proc_maps_private *priv = m->private;
213
214         if (!IS_ERR_OR_NULL(v))
215                 vma_stop(priv);
216         if (priv->task) {
217                 put_task_struct(priv->task);
218                 priv->task = NULL;
219         }
220 }
221
222 static int proc_maps_open(struct inode *inode, struct file *file,
223                         const struct seq_operations *ops, int psize)
224 {
225         struct proc_maps_private *priv = __seq_open_private(file, ops, psize);
226
227         if (!priv)
228                 return -ENOMEM;
229
230         priv->inode = inode;
231         priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
232         if (IS_ERR(priv->mm)) {
233                 int err = PTR_ERR(priv->mm);
234
235                 seq_release_private(inode, file);
236                 return err;
237         }
238
239         return 0;
240 }
241
242 static int proc_map_release(struct inode *inode, struct file *file)
243 {
244         struct seq_file *seq = file->private_data;
245         struct proc_maps_private *priv = seq->private;
246
247         if (priv->mm)
248                 mmdrop(priv->mm);
249
250         return seq_release_private(inode, file);
251 }
252
253 static int do_maps_open(struct inode *inode, struct file *file,
254                         const struct seq_operations *ops)
255 {
256         return proc_maps_open(inode, file, ops,
257                                 sizeof(struct proc_maps_private));
258 }
259
260 /*
261  * Indicate if the VMA is a stack for the given task; for
262  * /proc/PID/maps that is the stack of the main task.
263  */
264 static int is_stack(struct vm_area_struct *vma)
265 {
266         /*
267          * We make no effort to guess what a given thread considers to be
268          * its "stack".  It's not even well-defined for programs written
269          * languages like Go.
270          */
271         return vma->vm_start <= vma->vm_mm->start_stack &&
272                 vma->vm_end >= vma->vm_mm->start_stack;
273 }
274
275 static void show_vma_header_prefix(struct seq_file *m,
276                                    unsigned long start, unsigned long end,
277                                    vm_flags_t flags, unsigned long long pgoff,
278                                    dev_t dev, unsigned long ino)
279 {
280         seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
281         seq_put_hex_ll(m, NULL, start, 8);
282         seq_put_hex_ll(m, "-", end, 8);
283         seq_putc(m, ' ');
284         seq_putc(m, flags & VM_READ ? 'r' : '-');
285         seq_putc(m, flags & VM_WRITE ? 'w' : '-');
286         seq_putc(m, flags & VM_EXEC ? 'x' : '-');
287         seq_putc(m, flags & VM_MAYSHARE ? 's' : 'p');
288         seq_put_hex_ll(m, " ", pgoff, 8);
289         seq_put_hex_ll(m, " ", MAJOR(dev), 2);
290         seq_put_hex_ll(m, ":", MINOR(dev), 2);
291         seq_put_decimal_ull(m, " ", ino);
292         seq_putc(m, ' ');
293 }
294
295 static void
296 show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
297 {
298         struct mm_struct *mm = vma->vm_mm;
299         struct file *file = vma->vm_file;
300         vm_flags_t flags = vma->vm_flags;
301         unsigned long ino = 0;
302         unsigned long long pgoff = 0;
303         unsigned long start, end;
304         dev_t dev = 0;
305         const char *name = NULL;
306
307         if (file) {
308                 struct inode *inode = file_inode(vma->vm_file);
309                 dev = inode->i_sb->s_dev;
310                 ino = inode->i_ino;
311                 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
312         }
313
314         start = vma->vm_start;
315         end = vma->vm_end;
316         show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino);
317
318         /*
319          * Print the dentry name for named mappings, and a
320          * special [heap] marker for the heap:
321          */
322         if (file) {
323                 seq_pad(m, ' ');
324                 seq_file_path(m, file, "\n");
325                 goto done;
326         }
327
328         if (vma->vm_ops && vma->vm_ops->name) {
329                 name = vma->vm_ops->name(vma);
330                 if (name)
331                         goto done;
332         }
333
334         name = arch_vma_name(vma);
335         if (!name) {
336                 if (!mm) {
337                         name = "[vdso]";
338                         goto done;
339                 }
340
341                 if (vma->vm_start <= mm->brk &&
342                     vma->vm_end >= mm->start_brk) {
343                         name = "[heap]";
344                         goto done;
345                 }
346
347                 if (is_stack(vma))
348                         name = "[stack]";
349         }
350
351 done:
352         if (name) {
353                 seq_pad(m, ' ');
354                 seq_puts(m, name);
355         }
356         seq_putc(m, '\n');
357 }
358
359 static int show_map(struct seq_file *m, void *v)
360 {
361         show_map_vma(m, v);
362         m_cache_vma(m, v);
363         return 0;
364 }
365
366 static const struct seq_operations proc_pid_maps_op = {
367         .start  = m_start,
368         .next   = m_next,
369         .stop   = m_stop,
370         .show   = show_map
371 };
372
373 static int pid_maps_open(struct inode *inode, struct file *file)
374 {
375         return do_maps_open(inode, file, &proc_pid_maps_op);
376 }
377
378 const struct file_operations proc_pid_maps_operations = {
379         .open           = pid_maps_open,
380         .read           = seq_read,
381         .llseek         = seq_lseek,
382         .release        = proc_map_release,
383 };
384
385 /*
386  * Proportional Set Size(PSS): my share of RSS.
387  *
388  * PSS of a process is the count of pages it has in memory, where each
389  * page is divided by the number of processes sharing it.  So if a
390  * process has 1000 pages all to itself, and 1000 shared with one other
391  * process, its PSS will be 1500.
392  *
393  * To keep (accumulated) division errors low, we adopt a 64bit
394  * fixed-point pss counter to minimize division errors. So (pss >>
395  * PSS_SHIFT) would be the real byte count.
396  *
397  * A shift of 12 before division means (assuming 4K page size):
398  *      - 1M 3-user-pages add up to 8KB errors;
399  *      - supports mapcount up to 2^24, or 16M;
400  *      - supports PSS up to 2^52 bytes, or 4PB.
401  */
402 #define PSS_SHIFT 12
403
404 #ifdef CONFIG_PROC_PAGE_MONITOR
405 struct mem_size_stats {
406         unsigned long resident;
407         unsigned long shared_clean;
408         unsigned long shared_dirty;
409         unsigned long private_clean;
410         unsigned long private_dirty;
411         unsigned long referenced;
412         unsigned long anonymous;
413         unsigned long lazyfree;
414         unsigned long anonymous_thp;
415         unsigned long shmem_thp;
416         unsigned long swap;
417         unsigned long shared_hugetlb;
418         unsigned long private_hugetlb;
419         u64 pss;
420         u64 pss_locked;
421         u64 swap_pss;
422         bool check_shmem_swap;
423 };
424
425 static void smaps_account(struct mem_size_stats *mss, struct page *page,
426                 bool compound, bool young, bool dirty, bool locked)
427 {
428         int i, nr = compound ? 1 << compound_order(page) : 1;
429         unsigned long size = nr * PAGE_SIZE;
430
431         if (PageAnon(page)) {
432                 mss->anonymous += size;
433                 if (!PageSwapBacked(page) && !dirty && !PageDirty(page))
434                         mss->lazyfree += size;
435         }
436
437         mss->resident += size;
438         /* Accumulate the size in pages that have been accessed. */
439         if (young || page_is_young(page) || PageReferenced(page))
440                 mss->referenced += size;
441
442         /*
443          * page_count(page) == 1 guarantees the page is mapped exactly once.
444          * If any subpage of the compound page mapped with PTE it would elevate
445          * page_count().
446          */
447         if (page_count(page) == 1) {
448                 if (dirty || PageDirty(page))
449                         mss->private_dirty += size;
450                 else
451                         mss->private_clean += size;
452                 mss->pss += (u64)size << PSS_SHIFT;
453                 if (locked)
454                         mss->pss_locked += (u64)size << PSS_SHIFT;
455                 return;
456         }
457
458         for (i = 0; i < nr; i++, page++) {
459                 int mapcount = page_mapcount(page);
460                 unsigned long pss = (PAGE_SIZE << PSS_SHIFT);
461
462                 if (mapcount >= 2) {
463                         if (dirty || PageDirty(page))
464                                 mss->shared_dirty += PAGE_SIZE;
465                         else
466                                 mss->shared_clean += PAGE_SIZE;
467                         mss->pss += pss / mapcount;
468                         if (locked)
469                                 mss->pss_locked += pss / mapcount;
470                 } else {
471                         if (dirty || PageDirty(page))
472                                 mss->private_dirty += PAGE_SIZE;
473                         else
474                                 mss->private_clean += PAGE_SIZE;
475                         mss->pss += pss;
476                         if (locked)
477                                 mss->pss_locked += pss;
478                 }
479         }
480 }
481
482 #ifdef CONFIG_SHMEM
483 static int smaps_pte_hole(unsigned long addr, unsigned long end,
484                 struct mm_walk *walk)
485 {
486         struct mem_size_stats *mss = walk->private;
487
488         mss->swap += shmem_partial_swap_usage(
489                         walk->vma->vm_file->f_mapping, addr, end);
490
491         return 0;
492 }
493 #endif
494
495 static void smaps_pte_entry(pte_t *pte, unsigned long addr,
496                 struct mm_walk *walk)
497 {
498         struct mem_size_stats *mss = walk->private;
499         struct vm_area_struct *vma = walk->vma;
500         bool locked = !!(vma->vm_flags & VM_LOCKED);
501         struct page *page = NULL;
502
503         if (pte_present(*pte)) {
504                 page = vm_normal_page(vma, addr, *pte);
505         } else if (is_swap_pte(*pte)) {
506                 swp_entry_t swpent = pte_to_swp_entry(*pte);
507
508                 if (!non_swap_entry(swpent)) {
509                         int mapcount;
510
511                         mss->swap += PAGE_SIZE;
512                         mapcount = swp_swapcount(swpent);
513                         if (mapcount >= 2) {
514                                 u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
515
516                                 do_div(pss_delta, mapcount);
517                                 mss->swap_pss += pss_delta;
518                         } else {
519                                 mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
520                         }
521                 } else if (is_migration_entry(swpent))
522                         page = migration_entry_to_page(swpent);
523                 else if (is_device_private_entry(swpent))
524                         page = device_private_entry_to_page(swpent);
525         } else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap
526                                                         && pte_none(*pte))) {
527                 page = find_get_entry(vma->vm_file->f_mapping,
528                                                 linear_page_index(vma, addr));
529                 if (!page)
530                         return;
531
532                 if (xa_is_value(page))
533                         mss->swap += PAGE_SIZE;
534                 else
535                         put_page(page);
536
537                 return;
538         }
539
540         if (!page)
541                 return;
542
543         smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked);
544 }
545
546 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
547 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
548                 struct mm_walk *walk)
549 {
550         struct mem_size_stats *mss = walk->private;
551         struct vm_area_struct *vma = walk->vma;
552         bool locked = !!(vma->vm_flags & VM_LOCKED);
553         struct page *page;
554
555         /* FOLL_DUMP will return -EFAULT on huge zero page */
556         page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
557         if (IS_ERR_OR_NULL(page))
558                 return;
559         if (PageAnon(page))
560                 mss->anonymous_thp += HPAGE_PMD_SIZE;
561         else if (PageSwapBacked(page))
562                 mss->shmem_thp += HPAGE_PMD_SIZE;
563         else if (is_zone_device_page(page))
564                 /* pass */;
565         else
566                 VM_BUG_ON_PAGE(1, page);
567         smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked);
568 }
569 #else
570 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
571                 struct mm_walk *walk)
572 {
573 }
574 #endif
575
576 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
577                            struct mm_walk *walk)
578 {
579         struct vm_area_struct *vma = walk->vma;
580         pte_t *pte;
581         spinlock_t *ptl;
582
583         ptl = pmd_trans_huge_lock(pmd, vma);
584         if (ptl) {
585                 if (pmd_present(*pmd))
586                         smaps_pmd_entry(pmd, addr, walk);
587                 spin_unlock(ptl);
588                 goto out;
589         }
590
591         if (pmd_trans_unstable(pmd))
592                 goto out;
593         /*
594          * The mmap_sem held all the way back in m_start() is what
595          * keeps khugepaged out of here and from collapsing things
596          * in here.
597          */
598         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
599         for (; addr != end; pte++, addr += PAGE_SIZE)
600                 smaps_pte_entry(pte, addr, walk);
601         pte_unmap_unlock(pte - 1, ptl);
602 out:
603         cond_resched();
604         return 0;
605 }
606
607 static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
608 {
609         /*
610          * Don't forget to update Documentation/ on changes.
611          */
612         static const char mnemonics[BITS_PER_LONG][2] = {
613                 /*
614                  * In case if we meet a flag we don't know about.
615                  */
616                 [0 ... (BITS_PER_LONG-1)] = "??",
617
618                 [ilog2(VM_READ)]        = "rd",
619                 [ilog2(VM_WRITE)]       = "wr",
620                 [ilog2(VM_EXEC)]        = "ex",
621                 [ilog2(VM_SHARED)]      = "sh",
622                 [ilog2(VM_MAYREAD)]     = "mr",
623                 [ilog2(VM_MAYWRITE)]    = "mw",
624                 [ilog2(VM_MAYEXEC)]     = "me",
625                 [ilog2(VM_MAYSHARE)]    = "ms",
626                 [ilog2(VM_GROWSDOWN)]   = "gd",
627                 [ilog2(VM_PFNMAP)]      = "pf",
628                 [ilog2(VM_DENYWRITE)]   = "dw",
629 #ifdef CONFIG_X86_INTEL_MPX
630                 [ilog2(VM_MPX)]         = "mp",
631 #endif
632                 [ilog2(VM_LOCKED)]      = "lo",
633                 [ilog2(VM_IO)]          = "io",
634                 [ilog2(VM_SEQ_READ)]    = "sr",
635                 [ilog2(VM_RAND_READ)]   = "rr",
636                 [ilog2(VM_DONTCOPY)]    = "dc",
637                 [ilog2(VM_DONTEXPAND)]  = "de",
638                 [ilog2(VM_ACCOUNT)]     = "ac",
639                 [ilog2(VM_NORESERVE)]   = "nr",
640                 [ilog2(VM_HUGETLB)]     = "ht",
641                 [ilog2(VM_SYNC)]        = "sf",
642                 [ilog2(VM_ARCH_1)]      = "ar",
643                 [ilog2(VM_WIPEONFORK)]  = "wf",
644                 [ilog2(VM_DONTDUMP)]    = "dd",
645 #ifdef CONFIG_MEM_SOFT_DIRTY
646                 [ilog2(VM_SOFTDIRTY)]   = "sd",
647 #endif
648                 [ilog2(VM_MIXEDMAP)]    = "mm",
649                 [ilog2(VM_HUGEPAGE)]    = "hg",
650                 [ilog2(VM_NOHUGEPAGE)]  = "nh",
651                 [ilog2(VM_MERGEABLE)]   = "mg",
652                 [ilog2(VM_UFFD_MISSING)]= "um",
653                 [ilog2(VM_UFFD_WP)]     = "uw",
654 #ifdef CONFIG_ARCH_HAS_PKEYS
655                 /* These come out via ProtectionKey: */
656                 [ilog2(VM_PKEY_BIT0)]   = "",
657                 [ilog2(VM_PKEY_BIT1)]   = "",
658                 [ilog2(VM_PKEY_BIT2)]   = "",
659                 [ilog2(VM_PKEY_BIT3)]   = "",
660 #if VM_PKEY_BIT4
661                 [ilog2(VM_PKEY_BIT4)]   = "",
662 #endif
663 #endif /* CONFIG_ARCH_HAS_PKEYS */
664         };
665         size_t i;
666
667         seq_puts(m, "VmFlags: ");
668         for (i = 0; i < BITS_PER_LONG; i++) {
669                 if (!mnemonics[i][0])
670                         continue;
671                 if (vma->vm_flags & (1UL << i)) {
672                         seq_putc(m, mnemonics[i][0]);
673                         seq_putc(m, mnemonics[i][1]);
674                         seq_putc(m, ' ');
675                 }
676         }
677         seq_putc(m, '\n');
678 }
679
680 #ifdef CONFIG_HUGETLB_PAGE
681 static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
682                                  unsigned long addr, unsigned long end,
683                                  struct mm_walk *walk)
684 {
685         struct mem_size_stats *mss = walk->private;
686         struct vm_area_struct *vma = walk->vma;
687         struct page *page = NULL;
688
689         if (pte_present(*pte)) {
690                 page = vm_normal_page(vma, addr, *pte);
691         } else if (is_swap_pte(*pte)) {
692                 swp_entry_t swpent = pte_to_swp_entry(*pte);
693
694                 if (is_migration_entry(swpent))
695                         page = migration_entry_to_page(swpent);
696                 else if (is_device_private_entry(swpent))
697                         page = device_private_entry_to_page(swpent);
698         }
699         if (page) {
700                 int mapcount = page_mapcount(page);
701
702                 if (mapcount >= 2)
703                         mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
704                 else
705                         mss->private_hugetlb += huge_page_size(hstate_vma(vma));
706         }
707         return 0;
708 }
709 #endif /* HUGETLB_PAGE */
710
711 static void smap_gather_stats(struct vm_area_struct *vma,
712                              struct mem_size_stats *mss)
713 {
714         struct mm_walk smaps_walk = {
715                 .pmd_entry = smaps_pte_range,
716 #ifdef CONFIG_HUGETLB_PAGE
717                 .hugetlb_entry = smaps_hugetlb_range,
718 #endif
719                 .mm = vma->vm_mm,
720         };
721
722         smaps_walk.private = mss;
723
724 #ifdef CONFIG_SHMEM
725         /* In case of smaps_rollup, reset the value from previous vma */
726         mss->check_shmem_swap = false;
727         if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
728                 /*
729                  * For shared or readonly shmem mappings we know that all
730                  * swapped out pages belong to the shmem object, and we can
731                  * obtain the swap value much more efficiently. For private
732                  * writable mappings, we might have COW pages that are
733                  * not affected by the parent swapped out pages of the shmem
734                  * object, so we have to distinguish them during the page walk.
735                  * Unless we know that the shmem object (or the part mapped by
736                  * our VMA) has no swapped out pages at all.
737                  */
738                 unsigned long shmem_swapped = shmem_swap_usage(vma);
739
740                 if (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
741                                         !(vma->vm_flags & VM_WRITE)) {
742                         mss->swap += shmem_swapped;
743                 } else {
744                         mss->check_shmem_swap = true;
745                         smaps_walk.pte_hole = smaps_pte_hole;
746                 }
747         }
748 #endif
749         /* mmap_sem is held in m_start */
750         walk_page_vma(vma, &smaps_walk);
751 }
752
753 #define SEQ_PUT_DEC(str, val) \
754                 seq_put_decimal_ull_width(m, str, (val) >> 10, 8)
755
756 /* Show the contents common for smaps and smaps_rollup */
757 static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss)
758 {
759         SEQ_PUT_DEC("Rss:            ", mss->resident);
760         SEQ_PUT_DEC(" kB\nPss:            ", mss->pss >> PSS_SHIFT);
761         SEQ_PUT_DEC(" kB\nShared_Clean:   ", mss->shared_clean);
762         SEQ_PUT_DEC(" kB\nShared_Dirty:   ", mss->shared_dirty);
763         SEQ_PUT_DEC(" kB\nPrivate_Clean:  ", mss->private_clean);
764         SEQ_PUT_DEC(" kB\nPrivate_Dirty:  ", mss->private_dirty);
765         SEQ_PUT_DEC(" kB\nReferenced:     ", mss->referenced);
766         SEQ_PUT_DEC(" kB\nAnonymous:      ", mss->anonymous);
767         SEQ_PUT_DEC(" kB\nLazyFree:       ", mss->lazyfree);
768         SEQ_PUT_DEC(" kB\nAnonHugePages:  ", mss->anonymous_thp);
769         SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp);
770         SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss->shared_hugetlb);
771         seq_put_decimal_ull_width(m, " kB\nPrivate_Hugetlb: ",
772                                   mss->private_hugetlb >> 10, 7);
773         SEQ_PUT_DEC(" kB\nSwap:           ", mss->swap);
774         SEQ_PUT_DEC(" kB\nSwapPss:        ",
775                                         mss->swap_pss >> PSS_SHIFT);
776         SEQ_PUT_DEC(" kB\nLocked:         ",
777                                         mss->pss_locked >> PSS_SHIFT);
778         seq_puts(m, " kB\n");
779 }
780
781 static int show_smap(struct seq_file *m, void *v)
782 {
783         struct vm_area_struct *vma = v;
784         struct mem_size_stats mss;
785
786         memset(&mss, 0, sizeof(mss));
787
788         smap_gather_stats(vma, &mss);
789
790         show_map_vma(m, vma);
791
792         SEQ_PUT_DEC("Size:           ", vma->vm_end - vma->vm_start);
793         SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma));
794         SEQ_PUT_DEC(" kB\nMMUPageSize:    ", vma_mmu_pagesize(vma));
795         seq_puts(m, " kB\n");
796
797         __show_smap(m, &mss);
798
799         seq_printf(m, "THPeligible:    %d\n", transparent_hugepage_enabled(vma));
800
801         if (arch_pkeys_enabled())
802                 seq_printf(m, "ProtectionKey:  %8u\n", vma_pkey(vma));
803         show_smap_vma_flags(m, vma);
804
805         m_cache_vma(m, vma);
806
807         return 0;
808 }
809
810 static int show_smaps_rollup(struct seq_file *m, void *v)
811 {
812         struct proc_maps_private *priv = m->private;
813         struct mem_size_stats mss;
814         struct mm_struct *mm;
815         struct vm_area_struct *vma;
816         unsigned long last_vma_end = 0;
817         int ret = 0;
818
819         priv->task = get_proc_task(priv->inode);
820         if (!priv->task)
821                 return -ESRCH;
822
823         mm = priv->mm;
824         if (!mm || !mmget_not_zero(mm)) {
825                 ret = -ESRCH;
826                 goto out_put_task;
827         }
828
829         memset(&mss, 0, sizeof(mss));
830
831         down_read(&mm->mmap_sem);
832         hold_task_mempolicy(priv);
833
834         for (vma = priv->mm->mmap; vma; vma = vma->vm_next) {
835                 smap_gather_stats(vma, &mss);
836                 last_vma_end = vma->vm_end;
837         }
838
839         show_vma_header_prefix(m, priv->mm->mmap->vm_start,
840                                last_vma_end, 0, 0, 0, 0);
841         seq_pad(m, ' ');
842         seq_puts(m, "[rollup]\n");
843
844         __show_smap(m, &mss);
845
846         release_task_mempolicy(priv);
847         up_read(&mm->mmap_sem);
848         mmput(mm);
849
850 out_put_task:
851         put_task_struct(priv->task);
852         priv->task = NULL;
853
854         return ret;
855 }
856 #undef SEQ_PUT_DEC
857
858 static const struct seq_operations proc_pid_smaps_op = {
859         .start  = m_start,
860         .next   = m_next,
861         .stop   = m_stop,
862         .show   = show_smap
863 };
864
865 static int pid_smaps_open(struct inode *inode, struct file *file)
866 {
867         return do_maps_open(inode, file, &proc_pid_smaps_op);
868 }
869
870 static int smaps_rollup_open(struct inode *inode, struct file *file)
871 {
872         int ret;
873         struct proc_maps_private *priv;
874
875         priv = kzalloc(sizeof(*priv), GFP_KERNEL_ACCOUNT);
876         if (!priv)
877                 return -ENOMEM;
878
879         ret = single_open(file, show_smaps_rollup, priv);
880         if (ret)
881                 goto out_free;
882
883         priv->inode = inode;
884         priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
885         if (IS_ERR(priv->mm)) {
886                 ret = PTR_ERR(priv->mm);
887
888                 single_release(inode, file);
889                 goto out_free;
890         }
891
892         return 0;
893
894 out_free:
895         kfree(priv);
896         return ret;
897 }
898
899 static int smaps_rollup_release(struct inode *inode, struct file *file)
900 {
901         struct seq_file *seq = file->private_data;
902         struct proc_maps_private *priv = seq->private;
903
904         if (priv->mm)
905                 mmdrop(priv->mm);
906
907         kfree(priv);
908         return single_release(inode, file);
909 }
910
911 const struct file_operations proc_pid_smaps_operations = {
912         .open           = pid_smaps_open,
913         .read           = seq_read,
914         .llseek         = seq_lseek,
915         .release        = proc_map_release,
916 };
917
918 const struct file_operations proc_pid_smaps_rollup_operations = {
919         .open           = smaps_rollup_open,
920         .read           = seq_read,
921         .llseek         = seq_lseek,
922         .release        = smaps_rollup_release,
923 };
924
925 enum clear_refs_types {
926         CLEAR_REFS_ALL = 1,
927         CLEAR_REFS_ANON,
928         CLEAR_REFS_MAPPED,
929         CLEAR_REFS_SOFT_DIRTY,
930         CLEAR_REFS_MM_HIWATER_RSS,
931         CLEAR_REFS_LAST,
932 };
933
934 struct clear_refs_private {
935         enum clear_refs_types type;
936 };
937
938 #ifdef CONFIG_MEM_SOFT_DIRTY
939 static inline void clear_soft_dirty(struct vm_area_struct *vma,
940                 unsigned long addr, pte_t *pte)
941 {
942         /*
943          * The soft-dirty tracker uses #PF-s to catch writes
944          * to pages, so write-protect the pte as well. See the
945          * Documentation/admin-guide/mm/soft-dirty.rst for full description
946          * of how soft-dirty works.
947          */
948         pte_t ptent = *pte;
949
950         if (pte_present(ptent)) {
951                 pte_t old_pte;
952
953                 old_pte = ptep_modify_prot_start(vma, addr, pte);
954                 ptent = pte_wrprotect(old_pte);
955                 ptent = pte_clear_soft_dirty(ptent);
956                 ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
957         } else if (is_swap_pte(ptent)) {
958                 ptent = pte_swp_clear_soft_dirty(ptent);
959                 set_pte_at(vma->vm_mm, addr, pte, ptent);
960         }
961 }
962 #else
963 static inline void clear_soft_dirty(struct vm_area_struct *vma,
964                 unsigned long addr, pte_t *pte)
965 {
966 }
967 #endif
968
969 #if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
970 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
971                 unsigned long addr, pmd_t *pmdp)
972 {
973         pmd_t old, pmd = *pmdp;
974
975         if (pmd_present(pmd)) {
976                 /* See comment in change_huge_pmd() */
977                 old = pmdp_invalidate(vma, addr, pmdp);
978                 if (pmd_dirty(old))
979                         pmd = pmd_mkdirty(pmd);
980                 if (pmd_young(old))
981                         pmd = pmd_mkyoung(pmd);
982
983                 pmd = pmd_wrprotect(pmd);
984                 pmd = pmd_clear_soft_dirty(pmd);
985
986                 set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
987         } else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
988                 pmd = pmd_swp_clear_soft_dirty(pmd);
989                 set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
990         }
991 }
992 #else
993 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
994                 unsigned long addr, pmd_t *pmdp)
995 {
996 }
997 #endif
998
999 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
1000                                 unsigned long end, struct mm_walk *walk)
1001 {
1002         struct clear_refs_private *cp = walk->private;
1003         struct vm_area_struct *vma = walk->vma;
1004         pte_t *pte, ptent;
1005         spinlock_t *ptl;
1006         struct page *page;
1007
1008         ptl = pmd_trans_huge_lock(pmd, vma);
1009         if (ptl) {
1010                 if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
1011                         clear_soft_dirty_pmd(vma, addr, pmd);
1012                         goto out;
1013                 }
1014
1015                 if (!pmd_present(*pmd))
1016                         goto out;
1017
1018                 page = pmd_page(*pmd);
1019
1020                 /* Clear accessed and referenced bits. */
1021                 pmdp_test_and_clear_young(vma, addr, pmd);
1022                 test_and_clear_page_young(page);
1023                 ClearPageReferenced(page);
1024 out:
1025                 spin_unlock(ptl);
1026                 return 0;
1027         }
1028
1029         if (pmd_trans_unstable(pmd))
1030                 return 0;
1031
1032         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1033         for (; addr != end; pte++, addr += PAGE_SIZE) {
1034                 ptent = *pte;
1035
1036                 if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
1037                         clear_soft_dirty(vma, addr, pte);
1038                         continue;
1039                 }
1040
1041                 if (!pte_present(ptent))
1042                         continue;
1043
1044                 page = vm_normal_page(vma, addr, ptent);
1045                 if (!page)
1046                         continue;
1047
1048                 /* Clear accessed and referenced bits. */
1049                 ptep_test_and_clear_young(vma, addr, pte);
1050                 test_and_clear_page_young(page);
1051                 ClearPageReferenced(page);
1052         }
1053         pte_unmap_unlock(pte - 1, ptl);
1054         cond_resched();
1055         return 0;
1056 }
1057
1058 static int clear_refs_test_walk(unsigned long start, unsigned long end,
1059                                 struct mm_walk *walk)
1060 {
1061         struct clear_refs_private *cp = walk->private;
1062         struct vm_area_struct *vma = walk->vma;
1063
1064         if (vma->vm_flags & VM_PFNMAP)
1065                 return 1;
1066
1067         /*
1068          * Writing 1 to /proc/pid/clear_refs affects all pages.
1069          * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
1070          * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
1071          * Writing 4 to /proc/pid/clear_refs affects all pages.
1072          */
1073         if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
1074                 return 1;
1075         if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
1076                 return 1;
1077         return 0;
1078 }
1079
1080 static ssize_t clear_refs_write(struct file *file, const char __user *buf,
1081                                 size_t count, loff_t *ppos)
1082 {
1083         struct task_struct *task;
1084         char buffer[PROC_NUMBUF];
1085         struct mm_struct *mm;
1086         struct vm_area_struct *vma;
1087         enum clear_refs_types type;
1088         struct mmu_gather tlb;
1089         int itype;
1090         int rv;
1091
1092         memset(buffer, 0, sizeof(buffer));
1093         if (count > sizeof(buffer) - 1)
1094                 count = sizeof(buffer) - 1;
1095         if (copy_from_user(buffer, buf, count))
1096                 return -EFAULT;
1097         rv = kstrtoint(strstrip(buffer), 10, &itype);
1098         if (rv < 0)
1099                 return rv;
1100         type = (enum clear_refs_types)itype;
1101         if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
1102                 return -EINVAL;
1103
1104         task = get_proc_task(file_inode(file));
1105         if (!task)
1106                 return -ESRCH;
1107         mm = get_task_mm(task);
1108         if (mm) {
1109                 struct mmu_notifier_range range;
1110                 struct clear_refs_private cp = {
1111                         .type = type,
1112                 };
1113                 struct mm_walk clear_refs_walk = {
1114                         .pmd_entry = clear_refs_pte_range,
1115                         .test_walk = clear_refs_test_walk,
1116                         .mm = mm,
1117                         .private = &cp,
1118                 };
1119
1120                 if (type == CLEAR_REFS_MM_HIWATER_RSS) {
1121                         if (down_write_killable(&mm->mmap_sem)) {
1122                                 count = -EINTR;
1123                                 goto out_mm;
1124                         }
1125
1126                         /*
1127                          * Writing 5 to /proc/pid/clear_refs resets the peak
1128                          * resident set size to this mm's current rss value.
1129                          */
1130                         reset_mm_hiwater_rss(mm);
1131                         up_write(&mm->mmap_sem);
1132                         goto out_mm;
1133                 }
1134
1135                 down_read(&mm->mmap_sem);
1136                 tlb_gather_mmu(&tlb, mm, 0, -1);
1137                 if (type == CLEAR_REFS_SOFT_DIRTY) {
1138                         for (vma = mm->mmap; vma; vma = vma->vm_next) {
1139                                 if (!(vma->vm_flags & VM_SOFTDIRTY))
1140                                         continue;
1141                                 up_read(&mm->mmap_sem);
1142                                 if (down_write_killable(&mm->mmap_sem)) {
1143                                         count = -EINTR;
1144                                         goto out_mm;
1145                                 }
1146                                 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1147                                         vma->vm_flags &= ~VM_SOFTDIRTY;
1148                                         vma_set_page_prot(vma);
1149                                 }
1150                                 downgrade_write(&mm->mmap_sem);
1151                                 break;
1152                         }
1153
1154                         mmu_notifier_range_init(&range, mm, 0, -1UL);
1155                         mmu_notifier_invalidate_range_start(&range);
1156                 }
1157                 walk_page_range(0, mm->highest_vm_end, &clear_refs_walk);
1158                 if (type == CLEAR_REFS_SOFT_DIRTY)
1159                         mmu_notifier_invalidate_range_end(&range);
1160                 tlb_finish_mmu(&tlb, 0, -1);
1161                 up_read(&mm->mmap_sem);
1162 out_mm:
1163                 mmput(mm);
1164         }
1165         put_task_struct(task);
1166
1167         return count;
1168 }
1169
1170 const struct file_operations proc_clear_refs_operations = {
1171         .write          = clear_refs_write,
1172         .llseek         = noop_llseek,
1173 };
1174
1175 typedef struct {
1176         u64 pme;
1177 } pagemap_entry_t;
1178
1179 struct pagemapread {
1180         int pos, len;           /* units: PM_ENTRY_BYTES, not bytes */
1181         pagemap_entry_t *buffer;
1182         bool show_pfn;
1183 };
1184
1185 #define PAGEMAP_WALK_SIZE       (PMD_SIZE)
1186 #define PAGEMAP_WALK_MASK       (PMD_MASK)
1187
1188 #define PM_ENTRY_BYTES          sizeof(pagemap_entry_t)
1189 #define PM_PFRAME_BITS          55
1190 #define PM_PFRAME_MASK          GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
1191 #define PM_SOFT_DIRTY           BIT_ULL(55)
1192 #define PM_MMAP_EXCLUSIVE       BIT_ULL(56)
1193 #define PM_FILE                 BIT_ULL(61)
1194 #define PM_SWAP                 BIT_ULL(62)
1195 #define PM_PRESENT              BIT_ULL(63)
1196
1197 #define PM_END_OF_BUFFER    1
1198
1199 static inline pagemap_entry_t make_pme(u64 frame, u64 flags)
1200 {
1201         return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags };
1202 }
1203
1204 static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
1205                           struct pagemapread *pm)
1206 {
1207         pm->buffer[pm->pos++] = *pme;
1208         if (pm->pos >= pm->len)
1209                 return PM_END_OF_BUFFER;
1210         return 0;
1211 }
1212
1213 static int pagemap_pte_hole(unsigned long start, unsigned long end,
1214                                 struct mm_walk *walk)
1215 {
1216         struct pagemapread *pm = walk->private;
1217         unsigned long addr = start;
1218         int err = 0;
1219
1220         while (addr < end) {
1221                 struct vm_area_struct *vma = find_vma(walk->mm, addr);
1222                 pagemap_entry_t pme = make_pme(0, 0);
1223                 /* End of address space hole, which we mark as non-present. */
1224                 unsigned long hole_end;
1225
1226                 if (vma)
1227                         hole_end = min(end, vma->vm_start);
1228                 else
1229                         hole_end = end;
1230
1231                 for (; addr < hole_end; addr += PAGE_SIZE) {
1232                         err = add_to_pagemap(addr, &pme, pm);
1233                         if (err)
1234                                 goto out;
1235                 }
1236
1237                 if (!vma)
1238                         break;
1239
1240                 /* Addresses in the VMA. */
1241                 if (vma->vm_flags & VM_SOFTDIRTY)
1242                         pme = make_pme(0, PM_SOFT_DIRTY);
1243                 for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
1244                         err = add_to_pagemap(addr, &pme, pm);
1245                         if (err)
1246                                 goto out;
1247                 }
1248         }
1249 out:
1250         return err;
1251 }
1252
1253 static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
1254                 struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1255 {
1256         u64 frame = 0, flags = 0;
1257         struct page *page = NULL;
1258
1259         if (pte_present(pte)) {
1260                 if (pm->show_pfn)
1261                         frame = pte_pfn(pte);
1262                 flags |= PM_PRESENT;
1263                 page = _vm_normal_page(vma, addr, pte, true);
1264                 if (pte_soft_dirty(pte))
1265                         flags |= PM_SOFT_DIRTY;
1266         } else if (is_swap_pte(pte)) {
1267                 swp_entry_t entry;
1268                 if (pte_swp_soft_dirty(pte))
1269                         flags |= PM_SOFT_DIRTY;
1270                 entry = pte_to_swp_entry(pte);
1271                 if (pm->show_pfn)
1272                         frame = swp_type(entry) |
1273                                 (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
1274                 flags |= PM_SWAP;
1275                 if (is_migration_entry(entry))
1276                         page = migration_entry_to_page(entry);
1277
1278                 if (is_device_private_entry(entry))
1279                         page = device_private_entry_to_page(entry);
1280         }
1281
1282         if (page && !PageAnon(page))
1283                 flags |= PM_FILE;
1284         if (page && page_mapcount(page) == 1)
1285                 flags |= PM_MMAP_EXCLUSIVE;
1286         if (vma->vm_flags & VM_SOFTDIRTY)
1287                 flags |= PM_SOFT_DIRTY;
1288
1289         return make_pme(frame, flags);
1290 }
1291
1292 static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
1293                              struct mm_walk *walk)
1294 {
1295         struct vm_area_struct *vma = walk->vma;
1296         struct pagemapread *pm = walk->private;
1297         spinlock_t *ptl;
1298         pte_t *pte, *orig_pte;
1299         int err = 0;
1300
1301 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1302         ptl = pmd_trans_huge_lock(pmdp, vma);
1303         if (ptl) {
1304                 u64 flags = 0, frame = 0;
1305                 pmd_t pmd = *pmdp;
1306                 struct page *page = NULL;
1307
1308                 if (vma->vm_flags & VM_SOFTDIRTY)
1309                         flags |= PM_SOFT_DIRTY;
1310
1311                 if (pmd_present(pmd)) {
1312                         page = pmd_page(pmd);
1313
1314                         flags |= PM_PRESENT;
1315                         if (pmd_soft_dirty(pmd))
1316                                 flags |= PM_SOFT_DIRTY;
1317                         if (pm->show_pfn)
1318                                 frame = pmd_pfn(pmd) +
1319                                         ((addr & ~PMD_MASK) >> PAGE_SHIFT);
1320                 }
1321 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1322                 else if (is_swap_pmd(pmd)) {
1323                         swp_entry_t entry = pmd_to_swp_entry(pmd);
1324                         unsigned long offset;
1325
1326                         if (pm->show_pfn) {
1327                                 offset = swp_offset(entry) +
1328                                         ((addr & ~PMD_MASK) >> PAGE_SHIFT);
1329                                 frame = swp_type(entry) |
1330                                         (offset << MAX_SWAPFILES_SHIFT);
1331                         }
1332                         flags |= PM_SWAP;
1333                         if (pmd_swp_soft_dirty(pmd))
1334                                 flags |= PM_SOFT_DIRTY;
1335                         VM_BUG_ON(!is_pmd_migration_entry(pmd));
1336                         page = migration_entry_to_page(entry);
1337                 }
1338 #endif
1339
1340                 if (page && page_mapcount(page) == 1)
1341                         flags |= PM_MMAP_EXCLUSIVE;
1342
1343                 for (; addr != end; addr += PAGE_SIZE) {
1344                         pagemap_entry_t pme = make_pme(frame, flags);
1345
1346                         err = add_to_pagemap(addr, &pme, pm);
1347                         if (err)
1348                                 break;
1349                         if (pm->show_pfn) {
1350                                 if (flags & PM_PRESENT)
1351                                         frame++;
1352                                 else if (flags & PM_SWAP)
1353                                         frame += (1 << MAX_SWAPFILES_SHIFT);
1354                         }
1355                 }
1356                 spin_unlock(ptl);
1357                 return err;
1358         }
1359
1360         if (pmd_trans_unstable(pmdp))
1361                 return 0;
1362 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1363
1364         /*
1365          * We can assume that @vma always points to a valid one and @end never
1366          * goes beyond vma->vm_end.
1367          */
1368         orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);
1369         for (; addr < end; pte++, addr += PAGE_SIZE) {
1370                 pagemap_entry_t pme;
1371
1372                 pme = pte_to_pagemap_entry(pm, vma, addr, *pte);
1373                 err = add_to_pagemap(addr, &pme, pm);
1374                 if (err)
1375                         break;
1376         }
1377         pte_unmap_unlock(orig_pte, ptl);
1378
1379         cond_resched();
1380
1381         return err;
1382 }
1383
1384 #ifdef CONFIG_HUGETLB_PAGE
1385 /* This function walks within one hugetlb entry in the single call */
1386 static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
1387                                  unsigned long addr, unsigned long end,
1388                                  struct mm_walk *walk)
1389 {
1390         struct pagemapread *pm = walk->private;
1391         struct vm_area_struct *vma = walk->vma;
1392         u64 flags = 0, frame = 0;
1393         int err = 0;
1394         pte_t pte;
1395
1396         if (vma->vm_flags & VM_SOFTDIRTY)
1397                 flags |= PM_SOFT_DIRTY;
1398
1399         pte = huge_ptep_get(ptep);
1400         if (pte_present(pte)) {
1401                 struct page *page = pte_page(pte);
1402
1403                 if (!PageAnon(page))
1404                         flags |= PM_FILE;
1405
1406                 if (page_mapcount(page) == 1)
1407                         flags |= PM_MMAP_EXCLUSIVE;
1408
1409                 flags |= PM_PRESENT;
1410                 if (pm->show_pfn)
1411                         frame = pte_pfn(pte) +
1412                                 ((addr & ~hmask) >> PAGE_SHIFT);
1413         }
1414
1415         for (; addr != end; addr += PAGE_SIZE) {
1416                 pagemap_entry_t pme = make_pme(frame, flags);
1417
1418                 err = add_to_pagemap(addr, &pme, pm);
1419                 if (err)
1420                         return err;
1421                 if (pm->show_pfn && (flags & PM_PRESENT))
1422                         frame++;
1423         }
1424
1425         cond_resched();
1426
1427         return err;
1428 }
1429 #endif /* HUGETLB_PAGE */
1430
1431 /*
1432  * /proc/pid/pagemap - an array mapping virtual pages to pfns
1433  *
1434  * For each page in the address space, this file contains one 64-bit entry
1435  * consisting of the following:
1436  *
1437  * Bits 0-54  page frame number (PFN) if present
1438  * Bits 0-4   swap type if swapped
1439  * Bits 5-54  swap offset if swapped
1440  * Bit  55    pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst)
1441  * Bit  56    page exclusively mapped
1442  * Bits 57-60 zero
1443  * Bit  61    page is file-page or shared-anon
1444  * Bit  62    page swapped
1445  * Bit  63    page present
1446  *
1447  * If the page is not present but in swap, then the PFN contains an
1448  * encoding of the swap file number and the page's offset into the
1449  * swap. Unmapped pages return a null PFN. This allows determining
1450  * precisely which pages are mapped (or in swap) and comparing mapped
1451  * pages between processes.
1452  *
1453  * Efficient users of this interface will use /proc/pid/maps to
1454  * determine which areas of memory are actually mapped and llseek to
1455  * skip over unmapped regions.
1456  */
1457 static ssize_t pagemap_read(struct file *file, char __user *buf,
1458                             size_t count, loff_t *ppos)
1459 {
1460         struct mm_struct *mm = file->private_data;
1461         struct pagemapread pm;
1462         struct mm_walk pagemap_walk = {};
1463         unsigned long src;
1464         unsigned long svpfn;
1465         unsigned long start_vaddr;
1466         unsigned long end_vaddr;
1467         int ret = 0, copied = 0;
1468
1469         if (!mm || !mmget_not_zero(mm))
1470                 goto out;
1471
1472         ret = -EINVAL;
1473         /* file position must be aligned */
1474         if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
1475                 goto out_mm;
1476
1477         ret = 0;
1478         if (!count)
1479                 goto out_mm;
1480
1481         /* do not disclose physical addresses: attack vector */
1482         pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN);
1483
1484         pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
1485         pm.buffer = kmalloc_array(pm.len, PM_ENTRY_BYTES, GFP_KERNEL);
1486         ret = -ENOMEM;
1487         if (!pm.buffer)
1488                 goto out_mm;
1489
1490         pagemap_walk.pmd_entry = pagemap_pmd_range;
1491         pagemap_walk.pte_hole = pagemap_pte_hole;
1492 #ifdef CONFIG_HUGETLB_PAGE
1493         pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
1494 #endif
1495         pagemap_walk.mm = mm;
1496         pagemap_walk.private = &pm;
1497
1498         src = *ppos;
1499         svpfn = src / PM_ENTRY_BYTES;
1500         start_vaddr = svpfn << PAGE_SHIFT;
1501         end_vaddr = mm->task_size;
1502
1503         /* watch out for wraparound */
1504         if (svpfn > mm->task_size >> PAGE_SHIFT)
1505                 start_vaddr = end_vaddr;
1506
1507         /*
1508          * The odds are that this will stop walking way
1509          * before end_vaddr, because the length of the
1510          * user buffer is tracked in "pm", and the walk
1511          * will stop when we hit the end of the buffer.
1512          */
1513         ret = 0;
1514         while (count && (start_vaddr < end_vaddr)) {
1515                 int len;
1516                 unsigned long end;
1517
1518                 pm.pos = 0;
1519                 end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
1520                 /* overflow ? */
1521                 if (end < start_vaddr || end > end_vaddr)
1522                         end = end_vaddr;
1523                 down_read(&mm->mmap_sem);
1524                 ret = walk_page_range(start_vaddr, end, &pagemap_walk);
1525                 up_read(&mm->mmap_sem);
1526                 start_vaddr = end;
1527
1528                 len = min(count, PM_ENTRY_BYTES * pm.pos);
1529                 if (copy_to_user(buf, pm.buffer, len)) {
1530                         ret = -EFAULT;
1531                         goto out_free;
1532                 }
1533                 copied += len;
1534                 buf += len;
1535                 count -= len;
1536         }
1537         *ppos += copied;
1538         if (!ret || ret == PM_END_OF_BUFFER)
1539                 ret = copied;
1540
1541 out_free:
1542         kfree(pm.buffer);
1543 out_mm:
1544         mmput(mm);
1545 out:
1546         return ret;
1547 }
1548
1549 static int pagemap_open(struct inode *inode, struct file *file)
1550 {
1551         struct mm_struct *mm;
1552
1553         mm = proc_mem_open(inode, PTRACE_MODE_READ);
1554         if (IS_ERR(mm))
1555                 return PTR_ERR(mm);
1556         file->private_data = mm;
1557         return 0;
1558 }
1559
1560 static int pagemap_release(struct inode *inode, struct file *file)
1561 {
1562         struct mm_struct *mm = file->private_data;
1563
1564         if (mm)
1565                 mmdrop(mm);
1566         return 0;
1567 }
1568
1569 const struct file_operations proc_pagemap_operations = {
1570         .llseek         = mem_lseek, /* borrow this */
1571         .read           = pagemap_read,
1572         .open           = pagemap_open,
1573         .release        = pagemap_release,
1574 };
1575 #endif /* CONFIG_PROC_PAGE_MONITOR */
1576
1577 #ifdef CONFIG_NUMA
1578
1579 struct numa_maps {
1580         unsigned long pages;
1581         unsigned long anon;
1582         unsigned long active;
1583         unsigned long writeback;
1584         unsigned long mapcount_max;
1585         unsigned long dirty;
1586         unsigned long swapcache;
1587         unsigned long node[MAX_NUMNODES];
1588 };
1589
1590 struct numa_maps_private {
1591         struct proc_maps_private proc_maps;
1592         struct numa_maps md;
1593 };
1594
1595 static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
1596                         unsigned long nr_pages)
1597 {
1598         int count = page_mapcount(page);
1599
1600         md->pages += nr_pages;
1601         if (pte_dirty || PageDirty(page))
1602                 md->dirty += nr_pages;
1603
1604         if (PageSwapCache(page))
1605                 md->swapcache += nr_pages;
1606
1607         if (PageActive(page) || PageUnevictable(page))
1608                 md->active += nr_pages;
1609
1610         if (PageWriteback(page))
1611                 md->writeback += nr_pages;
1612
1613         if (PageAnon(page))
1614                 md->anon += nr_pages;
1615
1616         if (count > md->mapcount_max)
1617                 md->mapcount_max = count;
1618
1619         md->node[page_to_nid(page)] += nr_pages;
1620 }
1621
1622 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
1623                 unsigned long addr)
1624 {
1625         struct page *page;
1626         int nid;
1627
1628         if (!pte_present(pte))
1629                 return NULL;
1630
1631         page = vm_normal_page(vma, addr, pte);
1632         if (!page)
1633                 return NULL;
1634
1635         if (PageReserved(page))
1636                 return NULL;
1637
1638         nid = page_to_nid(page);
1639         if (!node_isset(nid, node_states[N_MEMORY]))
1640                 return NULL;
1641
1642         return page;
1643 }
1644
1645 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1646 static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
1647                                               struct vm_area_struct *vma,
1648                                               unsigned long addr)
1649 {
1650         struct page *page;
1651         int nid;
1652
1653         if (!pmd_present(pmd))
1654                 return NULL;
1655
1656         page = vm_normal_page_pmd(vma, addr, pmd);
1657         if (!page)
1658                 return NULL;
1659
1660         if (PageReserved(page))
1661                 return NULL;
1662
1663         nid = page_to_nid(page);
1664         if (!node_isset(nid, node_states[N_MEMORY]))
1665                 return NULL;
1666
1667         return page;
1668 }
1669 #endif
1670
1671 static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1672                 unsigned long end, struct mm_walk *walk)
1673 {
1674         struct numa_maps *md = walk->private;
1675         struct vm_area_struct *vma = walk->vma;
1676         spinlock_t *ptl;
1677         pte_t *orig_pte;
1678         pte_t *pte;
1679
1680 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1681         ptl = pmd_trans_huge_lock(pmd, vma);
1682         if (ptl) {
1683                 struct page *page;
1684
1685                 page = can_gather_numa_stats_pmd(*pmd, vma, addr);
1686                 if (page)
1687                         gather_stats(page, md, pmd_dirty(*pmd),
1688                                      HPAGE_PMD_SIZE/PAGE_SIZE);
1689                 spin_unlock(ptl);
1690                 return 0;
1691         }
1692
1693         if (pmd_trans_unstable(pmd))
1694                 return 0;
1695 #endif
1696         orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1697         do {
1698                 struct page *page = can_gather_numa_stats(*pte, vma, addr);
1699                 if (!page)
1700                         continue;
1701                 gather_stats(page, md, pte_dirty(*pte), 1);
1702
1703         } while (pte++, addr += PAGE_SIZE, addr != end);
1704         pte_unmap_unlock(orig_pte, ptl);
1705         cond_resched();
1706         return 0;
1707 }
1708 #ifdef CONFIG_HUGETLB_PAGE
1709 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
1710                 unsigned long addr, unsigned long end, struct mm_walk *walk)
1711 {
1712         pte_t huge_pte = huge_ptep_get(pte);
1713         struct numa_maps *md;
1714         struct page *page;
1715
1716         if (!pte_present(huge_pte))
1717                 return 0;
1718
1719         page = pte_page(huge_pte);
1720         if (!page)
1721                 return 0;
1722
1723         md = walk->private;
1724         gather_stats(page, md, pte_dirty(huge_pte), 1);
1725         return 0;
1726 }
1727
1728 #else
1729 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
1730                 unsigned long addr, unsigned long end, struct mm_walk *walk)
1731 {
1732         return 0;
1733 }
1734 #endif
1735
1736 /*
1737  * Display pages allocated per node and memory policy via /proc.
1738  */
1739 static int show_numa_map(struct seq_file *m, void *v)
1740 {
1741         struct numa_maps_private *numa_priv = m->private;
1742         struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
1743         struct vm_area_struct *vma = v;
1744         struct numa_maps *md = &numa_priv->md;
1745         struct file *file = vma->vm_file;
1746         struct mm_struct *mm = vma->vm_mm;
1747         struct mm_walk walk = {
1748                 .hugetlb_entry = gather_hugetlb_stats,
1749                 .pmd_entry = gather_pte_stats,
1750                 .private = md,
1751                 .mm = mm,
1752         };
1753         struct mempolicy *pol;
1754         char buffer[64];
1755         int nid;
1756
1757         if (!mm)
1758                 return 0;
1759
1760         /* Ensure we start with an empty set of numa_maps statistics. */
1761         memset(md, 0, sizeof(*md));
1762
1763         pol = __get_vma_policy(vma, vma->vm_start);
1764         if (pol) {
1765                 mpol_to_str(buffer, sizeof(buffer), pol);
1766                 mpol_cond_put(pol);
1767         } else {
1768                 mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
1769         }
1770
1771         seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1772
1773         if (file) {
1774                 seq_puts(m, " file=");
1775                 seq_file_path(m, file, "\n\t= ");
1776         } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1777                 seq_puts(m, " heap");
1778         } else if (is_stack(vma)) {
1779                 seq_puts(m, " stack");
1780         }
1781
1782         if (is_vm_hugetlb_page(vma))
1783                 seq_puts(m, " huge");
1784
1785         /* mmap_sem is held by m_start */
1786         walk_page_vma(vma, &walk);
1787
1788         if (!md->pages)
1789                 goto out;
1790
1791         if (md->anon)
1792                 seq_printf(m, " anon=%lu", md->anon);
1793
1794         if (md->dirty)
1795                 seq_printf(m, " dirty=%lu", md->dirty);
1796
1797         if (md->pages != md->anon && md->pages != md->dirty)
1798                 seq_printf(m, " mapped=%lu", md->pages);
1799
1800         if (md->mapcount_max > 1)
1801                 seq_printf(m, " mapmax=%lu", md->mapcount_max);
1802
1803         if (md->swapcache)
1804                 seq_printf(m, " swapcache=%lu", md->swapcache);
1805
1806         if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1807                 seq_printf(m, " active=%lu", md->active);
1808
1809         if (md->writeback)
1810                 seq_printf(m, " writeback=%lu", md->writeback);
1811
1812         for_each_node_state(nid, N_MEMORY)
1813                 if (md->node[nid])
1814                         seq_printf(m, " N%d=%lu", nid, md->node[nid]);
1815
1816         seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10);
1817 out:
1818         seq_putc(m, '\n');
1819         m_cache_vma(m, vma);
1820         return 0;
1821 }
1822
1823 static const struct seq_operations proc_pid_numa_maps_op = {
1824         .start  = m_start,
1825         .next   = m_next,
1826         .stop   = m_stop,
1827         .show   = show_numa_map,
1828 };
1829
1830 static int pid_numa_maps_open(struct inode *inode, struct file *file)
1831 {
1832         return proc_maps_open(inode, file, &proc_pid_numa_maps_op,
1833                                 sizeof(struct numa_maps_private));
1834 }
1835
1836 const struct file_operations proc_pid_numa_maps_operations = {
1837         .open           = pid_numa_maps_open,
1838         .read           = seq_read,
1839         .llseek         = seq_lseek,
1840         .release        = proc_map_release,
1841 };
1842
1843 #endif /* CONFIG_NUMA */