sector_t (*bmap)(struct address_space *, sector_t);
void (*invalidate_folio) (struct folio *, size_t start, size_t len);
bool (*release_folio)(struct folio *, gfp_t);
- void (*freepage)(struct page *);
+ void (*free_folio)(struct folio *);
int (*direct_IO)(struct kiocb *, struct iov_iter *iter);
bool (*isolate_page) (struct page *, isolate_mode_t);
int (*migratepage)(struct address_space *, struct page *, struct page *);
int (*swap_deactivate)(struct file *);
locking rules:
- All except dirty_folio and freepage may block
+ All except dirty_folio and free_folio may block
====================== ======================== ========= ===============
-ops PageLocked(page) i_rwsem invalidate_lock
+ops folio locked i_rwsem invalidate_lock
====================== ======================== ========= ===============
writepage: yes, unlocks (see below)
read_folio: yes, unlocks shared
bmap:
invalidate_folio: yes exclusive
release_folio: yes
-freepage: yes
+free_folio: yes
direct_IO:
isolate_page: yes
migratepage: yes (both)
indicate that the buffers are (or may be) freeable. If ->release_folio is
NULL, the kernel assumes that the fs has no private interest in the buffers.
-->freepage() is called when the kernel is done dropping the page
+->free_folio() is called when the kernel has dropped the folio
from the page cache.
->launder_folio() may be called prior to releasing a folio if
sector_t (*bmap)(struct address_space *, sector_t);
void (*invalidate_folio) (struct folio *, size_t start, size_t len);
bool (*release_folio)(struct folio *, gfp_t);
- void (*freepage)(struct page *);
+ void (*free_folio)(struct folio *);
ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter);
/* isolate a page for migration */
bool (*isolate_page) (struct page *, isolate_mode_t);
its release_folio will need to ensure this. Possibly it can
clear the uptodate flag if it cannot free private data yet.
-``freepage``
- freepage is called once the page is no longer visible in the
+``free_folio``
+ free_folio is called once the folio is no longer visible in the
page cache in order to allow the cleanup of any private data.
Since it may be called by the memory reclaimer, it should not
assume that the original address_space mapping still exists, and
sector_t (*bmap)(struct address_space *, sector_t);
void (*invalidate_folio) (struct folio *, size_t offset, size_t len);
bool (*release_folio)(struct folio *, gfp_t);
+ void (*free_folio)(struct folio *folio);
void (*freepage)(struct page *);
ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter);
/*
void filemap_free_folio(struct address_space *mapping, struct folio *folio)
{
void (*freepage)(struct page *);
+ void (*free_folio)(struct folio *);
int refs = 1;
+ free_folio = mapping->a_ops->free_folio;
+ if (free_folio)
+ free_folio(folio);
freepage = mapping->a_ops->freepage;
if (freepage)
freepage(&folio->page);
struct folio *fold = page_folio(old);
struct folio *fnew = page_folio(new);
struct address_space *mapping = old->mapping;
+ void (*free_folio)(struct folio *) = mapping->a_ops->free_folio;
void (*freepage)(struct page *) = mapping->a_ops->freepage;
pgoff_t offset = old->index;
XA_STATE(xas, &mapping->i_pages, offset);
if (PageSwapBacked(new))
__inc_lruvec_page_state(new, NR_SHMEM);
xas_unlock_irq(&xas);
+ if (free_folio)
+ free_folio(fold);
if (freepage)
freepage(old);
- put_page(old);
+ folio_put(fold);
}
EXPORT_SYMBOL_GPL(replace_page_cache_page);
xa_unlock_irq(&mapping->i_pages);
put_swap_page(&folio->page, swap);
} else {
+ void (*free_folio)(struct folio *);
void (*freepage)(struct page *);
+ free_folio = mapping->a_ops->free_folio;
freepage = mapping->a_ops->freepage;
/*
* Remember a shadow entry for reclaimed file cache in
inode_add_lru(mapping->host);
spin_unlock(&mapping->host->i_lock);
- if (freepage != NULL)
+ if (free_folio)
+ free_folio(folio);
+ if (freepage)
freepage(&folio->page);
}