1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_PAGEMAP_H
3 #define _LINUX_PAGEMAP_H
6 * Copyright 1995 Linus Torvalds
10 #include <linux/list.h>
11 #include <linux/highmem.h>
12 #include <linux/compiler.h>
13 #include <linux/uaccess.h>
14 #include <linux/gfp.h>
15 #include <linux/bitops.h>
16 #include <linux/hardirq.h> /* for in_interrupt() */
17 #include <linux/hugetlb_inline.h>
22 * Bits in mapping->flags.
25 AS_EIO = 0, /* IO error on async write */
26 AS_ENOSPC = 1, /* ENOSPC on async write */
27 AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */
28 AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */
29 AS_EXITING = 4, /* final truncate in progress */
30 /* writeback related tags are not used */
31 AS_NO_WRITEBACK_TAGS = 5,
35 * mapping_set_error - record a writeback error in the address_space
36 * @mapping: the mapping in which an error should be set
37 * @error: the error to set in the mapping
39 * When writeback fails in some way, we must record that error so that
40 * userspace can be informed when fsync and the like are called. We endeavor
41 * to report errors on any file that was open at the time of the error. Some
42 * internal callers also need to know when writeback errors have occurred.
44 * When a writeback error occurs, most filesystems will want to call
45 * mapping_set_error to record the error in the mapping so that it can be
46 * reported when the application calls fsync(2).
48 static inline void mapping_set_error(struct address_space *mapping, int error)
53 /* Record in wb_err for checkers using errseq_t based tracking */
54 filemap_set_wb_err(mapping, error);
56 /* Record it in flags for now, for legacy callers */
58 set_bit(AS_ENOSPC, &mapping->flags);
60 set_bit(AS_EIO, &mapping->flags);
63 static inline void mapping_set_unevictable(struct address_space *mapping)
65 set_bit(AS_UNEVICTABLE, &mapping->flags);
68 static inline void mapping_clear_unevictable(struct address_space *mapping)
70 clear_bit(AS_UNEVICTABLE, &mapping->flags);
73 static inline bool mapping_unevictable(struct address_space *mapping)
75 return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags);
78 static inline void mapping_set_exiting(struct address_space *mapping)
80 set_bit(AS_EXITING, &mapping->flags);
83 static inline int mapping_exiting(struct address_space *mapping)
85 return test_bit(AS_EXITING, &mapping->flags);
88 static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
90 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
93 static inline int mapping_use_writeback_tags(struct address_space *mapping)
95 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
98 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
100 return mapping->gfp_mask;
103 /* Restricts the given gfp_mask to what the mapping allows. */
104 static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
107 return mapping_gfp_mask(mapping) & gfp_mask;
111 * This is non-atomic. Only to be used before the mapping is activated.
112 * Probably needs a barrier...
114 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
119 void release_pages(struct page **pages, int nr);
122 * speculatively take a reference to a page.
123 * If the page is free (_refcount == 0), then _refcount is untouched, and 0
124 * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
126 * This function must be called inside the same rcu_read_lock() section as has
127 * been used to lookup the page in the pagecache radix-tree (or page table):
128 * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
130 * Unless an RCU grace period has passed, the count of all pages coming out
131 * of the allocator must be considered unstable. page_count may return higher
132 * than expected, and put_page must be able to do the right thing when the
133 * page has been finished with, no matter what it is subsequently allocated
134 * for (because put_page is what is used here to drop an invalid speculative
137 * This is the interesting part of the lockless pagecache (and lockless
138 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
139 * has the following pattern:
140 * 1. find page in radix tree
141 * 2. conditionally increment refcount
142 * 3. check the page is still in pagecache (if no, goto 1)
144 * Remove-side that cares about stability of _refcount (eg. reclaim) has the
145 * following (with the i_pages lock held):
146 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
147 * B. remove page from pagecache
150 * There are 2 critical interleavings that matter:
151 * - 2 runs before A: in this case, A sees elevated refcount and bails out
152 * - A runs before 2: in this case, 2 sees zero refcount and retries;
153 * subsequently, B will complete and 1 will find no page, causing the
154 * lookup to return NULL.
156 * It is possible that between 1 and 2, the page is removed then the exact same
157 * page is inserted into the same position in pagecache. That's OK: the
158 * old find_get_page using a lock could equally have run before or after
159 * such a re-insertion, depending on order that locks are granted.
161 * Lookups racing against pagecache insertion isn't a big problem: either 1
162 * will find the page or it will not. Likewise, the old find_get_page could run
163 * either before the insertion or afterwards, depending on timing.
165 static inline int __page_cache_add_speculative(struct page *page, int count)
167 #ifdef CONFIG_TINY_RCU
168 # ifdef CONFIG_PREEMPT_COUNT
169 VM_BUG_ON(!in_atomic() && !irqs_disabled());
172 * Preempt must be disabled here - we rely on rcu_read_lock doing
175 * Pagecache won't be truncated from interrupt context, so if we have
176 * found a page in the radix tree here, we have pinned its refcount by
177 * disabling preempt, and hence no need for the "speculative get" that
180 VM_BUG_ON_PAGE(page_count(page) == 0, page);
181 page_ref_add(page, count);
184 if (unlikely(!page_ref_add_unless(page, count, 0))) {
186 * Either the page has been freed, or will be freed.
187 * In either case, retry here and the caller should
188 * do the right thing (see comments above).
193 VM_BUG_ON_PAGE(PageTail(page), page);
198 static inline int page_cache_get_speculative(struct page *page)
200 return __page_cache_add_speculative(page, 1);
203 static inline int page_cache_add_speculative(struct page *page, int count)
205 return __page_cache_add_speculative(page, count);
209 extern struct page *__page_cache_alloc(gfp_t gfp);
211 static inline struct page *__page_cache_alloc(gfp_t gfp)
213 return alloc_pages(gfp, 0);
217 static inline struct page *page_cache_alloc(struct address_space *x)
219 return __page_cache_alloc(mapping_gfp_mask(x));
222 static inline gfp_t readahead_gfp_mask(struct address_space *x)
224 return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
227 typedef int filler_t(void *, struct page *);
229 pgoff_t page_cache_next_miss(struct address_space *mapping,
230 pgoff_t index, unsigned long max_scan);
231 pgoff_t page_cache_prev_miss(struct address_space *mapping,
232 pgoff_t index, unsigned long max_scan);
234 #define FGP_ACCESSED 0x00000001
235 #define FGP_LOCK 0x00000002
236 #define FGP_CREAT 0x00000004
237 #define FGP_WRITE 0x00000008
238 #define FGP_NOFS 0x00000010
239 #define FGP_NOWAIT 0x00000020
240 #define FGP_FOR_MMAP 0x00000040
242 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
243 int fgp_flags, gfp_t cache_gfp_mask);
246 * find_get_page - find and get a page reference
247 * @mapping: the address_space to search
248 * @offset: the page index
250 * Looks up the page cache slot at @mapping & @offset. If there is a
251 * page cache page, it is returned with an increased refcount.
253 * Otherwise, %NULL is returned.
255 static inline struct page *find_get_page(struct address_space *mapping,
258 return pagecache_get_page(mapping, offset, 0, 0);
261 static inline struct page *find_get_page_flags(struct address_space *mapping,
262 pgoff_t offset, int fgp_flags)
264 return pagecache_get_page(mapping, offset, fgp_flags, 0);
268 * find_lock_page - locate, pin and lock a pagecache page
269 * @mapping: the address_space to search
270 * @offset: the page index
272 * Looks up the page cache slot at @mapping & @offset. If there is a
273 * page cache page, it is returned locked and with an increased
276 * Otherwise, %NULL is returned.
278 * find_lock_page() may sleep.
280 static inline struct page *find_lock_page(struct address_space *mapping,
283 return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
287 * find_or_create_page - locate or add a pagecache page
288 * @mapping: the page's address_space
289 * @index: the page's index into the mapping
290 * @gfp_mask: page allocation mode
292 * Looks up the page cache slot at @mapping & @offset. If there is a
293 * page cache page, it is returned locked and with an increased
296 * If the page is not present, a new page is allocated using @gfp_mask
297 * and added to the page cache and the VM's LRU list. The page is
298 * returned locked and with an increased refcount.
300 * On memory exhaustion, %NULL is returned.
302 * find_or_create_page() may sleep, even if @gfp_flags specifies an
305 static inline struct page *find_or_create_page(struct address_space *mapping,
306 pgoff_t index, gfp_t gfp_mask)
308 return pagecache_get_page(mapping, index,
309 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
314 * grab_cache_page_nowait - returns locked page at given index in given cache
315 * @mapping: target address_space
316 * @index: the page index
318 * Same as grab_cache_page(), but do not wait if the page is unavailable.
319 * This is intended for speculative data generators, where the data can
320 * be regenerated if the page couldn't be grabbed. This routine should
321 * be safe to call while holding the lock for another page.
323 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
324 * and deadlock against the caller's locked page.
326 static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
329 return pagecache_get_page(mapping, index,
330 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
331 mapping_gfp_mask(mapping));
335 * Given the page we found in the page cache, return the page corresponding
336 * to this index in the file
338 static inline struct page *find_subpage(struct page *head, pgoff_t index)
340 /* HugeTLBfs wants the head page regardless */
344 return head + (index & (hpage_nr_pages(head) - 1));
347 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
348 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
349 unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
350 unsigned int nr_entries, struct page **entries,
352 unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
353 pgoff_t end, unsigned int nr_pages,
354 struct page **pages);
355 static inline unsigned find_get_pages(struct address_space *mapping,
356 pgoff_t *start, unsigned int nr_pages,
359 return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages,
362 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
363 unsigned int nr_pages, struct page **pages);
364 unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
365 pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
366 struct page **pages);
367 static inline unsigned find_get_pages_tag(struct address_space *mapping,
368 pgoff_t *index, xa_mark_t tag, unsigned int nr_pages,
371 return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
375 struct page *grab_cache_page_write_begin(struct address_space *mapping,
376 pgoff_t index, unsigned flags);
379 * Returns locked page at given index in given cache, creating it if needed.
381 static inline struct page *grab_cache_page(struct address_space *mapping,
384 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
387 extern struct page * read_cache_page(struct address_space *mapping,
388 pgoff_t index, filler_t *filler, void *data);
389 extern struct page * read_cache_page_gfp(struct address_space *mapping,
390 pgoff_t index, gfp_t gfp_mask);
391 extern int read_cache_pages(struct address_space *mapping,
392 struct list_head *pages, filler_t *filler, void *data);
394 static inline struct page *read_mapping_page(struct address_space *mapping,
395 pgoff_t index, void *data)
397 return read_cache_page(mapping, index, NULL, data);
401 * Get index of the page with in radix-tree
402 * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
404 static inline pgoff_t page_to_index(struct page *page)
408 if (likely(!PageTransTail(page)))
412 * We don't initialize ->index for tail pages: calculate based on
415 pgoff = compound_head(page)->index;
416 pgoff += page - compound_head(page);
421 * Get the offset in PAGE_SIZE.
422 * (TODO: hugepage should have ->index in PAGE_SIZE)
424 static inline pgoff_t page_to_pgoff(struct page *page)
426 if (unlikely(PageHeadHuge(page)))
427 return page->index << compound_order(page);
429 return page_to_index(page);
433 * Return byte-offset into filesystem object for page.
435 static inline loff_t page_offset(struct page *page)
437 return ((loff_t)page->index) << PAGE_SHIFT;
440 static inline loff_t page_file_offset(struct page *page)
442 return ((loff_t)page_index(page)) << PAGE_SHIFT;
445 extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
446 unsigned long address);
448 static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
449 unsigned long address)
452 if (unlikely(is_vm_hugetlb_page(vma)))
453 return linear_hugepage_index(vma, address);
454 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
455 pgoff += vma->vm_pgoff;
459 extern void __lock_page(struct page *page);
460 extern int __lock_page_killable(struct page *page);
461 extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
463 extern void unlock_page(struct page *page);
466 * Return true if the page was successfully locked
468 static inline int trylock_page(struct page *page)
470 page = compound_head(page);
471 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
475 * lock_page may only be called if we have the page's inode pinned.
477 static inline void lock_page(struct page *page)
480 if (!trylock_page(page))
485 * lock_page_killable is like lock_page but can be interrupted by fatal
486 * signals. It returns 0 if it locked the page and -EINTR if it was
487 * killed while waiting.
489 static inline int lock_page_killable(struct page *page)
492 if (!trylock_page(page))
493 return __lock_page_killable(page);
498 * lock_page_or_retry - Lock the page, unless this would block and the
499 * caller indicated that it can handle a retry.
501 * Return value and mmap_sem implications depend on flags; see
502 * __lock_page_or_retry().
504 static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
508 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
512 * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
513 * and should not be used directly.
515 extern void wait_on_page_bit(struct page *page, int bit_nr);
516 extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
519 * Wait for a page to be unlocked.
521 * This must be called with the caller "holding" the page,
522 * ie with increased "page->count" so that the page won't
523 * go away during the wait..
525 static inline void wait_on_page_locked(struct page *page)
527 if (PageLocked(page))
528 wait_on_page_bit(compound_head(page), PG_locked);
531 static inline int wait_on_page_locked_killable(struct page *page)
533 if (!PageLocked(page))
535 return wait_on_page_bit_killable(compound_head(page), PG_locked);
538 extern void put_and_wait_on_page_locked(struct page *page);
540 void wait_on_page_writeback(struct page *page);
541 extern void end_page_writeback(struct page *page);
542 void wait_for_stable_page(struct page *page);
544 void page_endio(struct page *page, bool is_write, int err);
547 * Add an arbitrary waiter to a page's wait queue
549 extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
552 * Fault everything in given userspace address range in.
554 static inline int fault_in_pages_writeable(char __user *uaddr, int size)
556 char __user *end = uaddr + size - 1;
558 if (unlikely(size == 0))
561 if (unlikely(uaddr > end))
564 * Writing zeroes into userspace here is OK, because we know that if
565 * the zero gets there, we'll be overwriting it.
568 if (unlikely(__put_user(0, uaddr) != 0))
571 } while (uaddr <= end);
573 /* Check whether the range spilled into the next page. */
574 if (((unsigned long)uaddr & PAGE_MASK) ==
575 ((unsigned long)end & PAGE_MASK))
576 return __put_user(0, end);
581 static inline int fault_in_pages_readable(const char __user *uaddr, int size)
584 const char __user *end = uaddr + size - 1;
586 if (unlikely(size == 0))
589 if (unlikely(uaddr > end))
593 if (unlikely(__get_user(c, uaddr) != 0))
596 } while (uaddr <= end);
598 /* Check whether the range spilled into the next page. */
599 if (((unsigned long)uaddr & PAGE_MASK) ==
600 ((unsigned long)end & PAGE_MASK)) {
601 return __get_user(c, end);
608 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
609 pgoff_t index, gfp_t gfp_mask);
610 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
611 pgoff_t index, gfp_t gfp_mask);
612 extern void delete_from_page_cache(struct page *page);
613 extern void __delete_from_page_cache(struct page *page, void *shadow);
614 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
615 void delete_from_page_cache_batch(struct address_space *mapping,
616 struct pagevec *pvec);
619 * Like add_to_page_cache_locked, but used to add newly allocated pages:
620 * the page is new, so we can just run __SetPageLocked() against it.
622 static inline int add_to_page_cache(struct page *page,
623 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
627 __SetPageLocked(page);
628 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
630 __ClearPageLocked(page);
634 static inline unsigned long dir_pages(struct inode *inode)
636 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
641 * page_mkwrite_check_truncate - check if page was truncated
642 * @page: the page to check
643 * @inode: the inode to check the page against
645 * Returns the number of bytes in the page up to EOF,
646 * or -EFAULT if the page was truncated.
648 static inline int page_mkwrite_check_truncate(struct page *page,
651 loff_t size = i_size_read(inode);
652 pgoff_t index = size >> PAGE_SHIFT;
653 int offset = offset_in_page(size);
655 if (page->mapping != inode->i_mapping)
658 /* page is wholly inside EOF */
659 if (page->index < index)
661 /* page is wholly past EOF */
662 if (page->index > index || !offset)
664 /* page is partially inside EOF */
668 #endif /* _LINUX_PAGEMAP_H */