From 1e12cbb9f69541181afab6b1ff358b4f1dd3e253 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 8 Nov 2023 18:28:04 +0000 Subject: [PATCH] mm: make mapping_evict_folio() the preferred way to evict clean folios Patch series "Fix fault handler's handling of poisoned tail pages". Since introducing the ability to have large folios in the page cache, it's been possible to have a hwpoisoned tail page returned from the fault handler. We handle this situation poorly; failing to remove the affected page from use. This isn't a minimal patch to fix it, it's a full conversion of all the code surrounding it. This patch (of 6): invalidate_inode_page() does very little beyond calling mapping_evict_folio(). Move the check for mapping being NULL into mapping_evict_folio() and make it available to the rest of the MM for use in the next few patches. Link: https://lkml.kernel.org/r/20231108182809.602073-1-willy@infradead.org Link: https://lkml.kernel.org/r/20231108182809.602073-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Cc: Naoya Horiguchi Signed-off-by: Andrew Morton --- mm/internal.h | 1 + mm/truncate.c | 33 ++++++++++++++++----------------- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/mm/internal.h b/mm/internal.h index 89a5a794d68f..7d18094e102d 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -138,6 +138,7 @@ void filemap_free_folio(struct address_space *mapping, struct folio *folio); int truncate_inode_folio(struct address_space *mapping, struct folio *folio); bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end); +long mapping_evict_folio(struct address_space *mapping, struct folio *folio); long invalidate_inode_page(struct page *page); unsigned long mapping_try_invalidate(struct address_space *mapping, pgoff_t start, pgoff_t end, unsigned long *nr_failed); diff --git a/mm/truncate.c b/mm/truncate.c index 8e3aa9e8618e..1d516e51e29d 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -266,9 +266,22 @@ int generic_error_remove_page(struct address_space *mapping, struct page *page) } EXPORT_SYMBOL(generic_error_remove_page); -static long mapping_evict_folio(struct address_space *mapping, - struct folio *folio) +/** + * mapping_evict_folio() - Remove an unused folio from the page-cache. + * @mapping: The mapping this folio belongs to. + * @folio: The folio to remove. + * + * Safely remove one folio from the page cache. + * It only drops clean, unused folios. + * + * Context: Folio must be locked. + * Return: The number of pages successfully removed. + */ +long mapping_evict_folio(struct address_space *mapping, struct folio *folio) { + /* The page may have been truncated before it was locked */ + if (!mapping) + return 0; if (folio_test_dirty(folio) || folio_test_writeback(folio)) return 0; /* The refcount will be elevated if any page in the folio is mapped */ @@ -281,25 +294,11 @@ static long mapping_evict_folio(struct address_space *mapping, return remove_mapping(mapping, folio); } -/** - * invalidate_inode_page() - Remove an unused page from the pagecache. - * @page: The page to remove. - * - * Safely invalidate one page from its pagecache mapping. - * It only drops clean, unused pages. - * - * Context: Page must be locked. - * Return: The number of pages successfully removed. - */ long invalidate_inode_page(struct page *page) { struct folio *folio = page_folio(page); - struct address_space *mapping = folio_mapping(folio); - /* The page may have been truncated before it was locked */ - if (!mapping) - return 0; - return mapping_evict_folio(mapping, folio); + return mapping_evict_folio(folio_mapping(folio), folio); } /** -- 2.20.1