From: Matthew Wilcox (Oracle) Date: Wed, 14 May 2025 18:15:07 +0000 (+0100) Subject: mm: rename page->index to page->__folio_index X-Git-Url: http://git.monstr.eu/?a=commitdiff_plain;h=acc53a0b4c156877773da6e9eea4113dc7e770ae;p=linux-2.6-microblaze.git mm: rename page->index to page->__folio_index All users of page->index have been converted to not refer to it any more. Update a few pieces of documentation that were missed and prevent new users from appearing (or at least make them easy to grep for). Link: https://lkml.kernel.org/r/20250514181508.3019795-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Acked-by: David Hildenbrand Signed-off-by: Andrew Morton --- diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 0749cf8a6637..5219158d54cf 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -227,9 +227,9 @@ void __flush_dcache_folio(struct address_space *mapping, struct folio *folio) } /* - * If this is a page cache page, and we have an aliasing VIPT cache, + * If this is a page cache folio, and we have an aliasing VIPT cache, * we only need to do one flush - which would be at the relevant - * userspace colour, which is congruent with page->index. + * userspace colour, which is congruent with folio->index. */ if (mapping && cache_is_vipt_aliasing()) flush_pfn_alias(folio_pfn(folio), folio_pos(folio)); diff --git a/include/linux/mm.h b/include/linux/mm.h index cd2e513189d6..5009c53ff1fe 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1276,9 +1276,9 @@ vm_fault_t finish_fault(struct vm_fault *vmf); * the page's disk buffers. PG_private must be set to tell the VM to call * into the filesystem to release these pages. * - * A page may belong to an inode's memory mapping. In this case, page->mapping - * is the pointer to the inode, and page->index is the file offset of the page, - * in units of PAGE_SIZE. + * A folio may belong to an inode's memory mapping. In this case, + * folio->mapping points to the inode, and folio->index is the file + * offset of the folio, in units of PAGE_SIZE. * * If pagecache pages are not associated with an inode, they are said to be * anonymous pages. These may become associated with the swapcache, and in that diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 3e934dc6057c..17e0dcb87aae 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -107,7 +107,7 @@ struct page { /* See page-flags.h for PAGE_MAPPING_FLAGS */ struct address_space *mapping; union { - pgoff_t index; /* Our offset within mapping. */ + pgoff_t __folio_index; /* Our offset within mapping. */ unsigned long share; /* share count for fsdax */ }; /** @@ -488,7 +488,7 @@ FOLIO_MATCH(flags, flags); FOLIO_MATCH(lru, lru); FOLIO_MATCH(mapping, mapping); FOLIO_MATCH(compound_head, lru); -FOLIO_MATCH(index, index); +FOLIO_MATCH(__folio_index, index); FOLIO_MATCH(private, private); FOLIO_MATCH(_mapcount, _mapcount); FOLIO_MATCH(_refcount, _refcount); @@ -589,7 +589,7 @@ TABLE_MATCH(flags, __page_flags); TABLE_MATCH(compound_head, pt_list); TABLE_MATCH(compound_head, _pt_pad_1); TABLE_MATCH(mapping, __page_mapping); -TABLE_MATCH(index, pt_index); +TABLE_MATCH(__folio_index, pt_index); TABLE_MATCH(rcu_head, pt_rcu_head); TABLE_MATCH(page_type, __page_type); TABLE_MATCH(_refcount, __page_refcount); diff --git a/kernel/futex/core.c b/kernel/futex/core.c index cca15859a50b..ecd74370e216 100644 --- a/kernel/futex/core.c +++ b/kernel/futex/core.c @@ -206,7 +206,7 @@ static u64 get_inode_sequence_number(struct inode *inode) * * For shared mappings (when @fshared), the key is: * - * ( inode->i_sequence, page->index, offset_within_page ) + * ( inode->i_sequence, page offset within mapping, offset_within_page ) * * [ also see get_inode_sequence_number() ] * diff --git a/mm/filemap.c b/mm/filemap.c index 09d005848f0d..bc244a2edc93 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -142,7 +142,7 @@ static void page_cache_delete(struct address_space *mapping, xas_init_marks(&xas); folio->mapping = NULL; - /* Leave page->index set: truncation lookup relies upon it */ + /* Leave folio->index set: truncation lookup relies upon it */ mapping->nrpages -= nr; } @@ -949,7 +949,7 @@ unlock: return 0; error: folio->mapping = NULL; - /* Leave page->index set: truncation relies upon it */ + /* Leave folio->index set: truncation relies upon it */ folio_put_refs(folio, nr); return xas_error(&xas); } diff --git a/mm/memory.c b/mm/memory.c index 5cb48f262ab0..37d8738f5e12 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4668,8 +4668,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) /* * KSM sometimes has to copy on read faults, for example, if - * page->index of !PageKSM() pages would be nonlinear inside the - * anon VMA -- PageKSM() is lost on actual swapout. + * folio->index of non-ksm folios would be nonlinear inside the + * anon VMA -- the ksm flag is lost on actual swapout. */ folio = ksm_might_need_to_copy(folio, vma, vmf->address); if (unlikely(!folio)) { diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 9ff44b64d3d6..e8641aa1043e 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2565,11 +2565,11 @@ struct folio *writeback_iter(struct address_space *mapping, if (!folio) { /* * To avoid deadlocks between range_cyclic writeback and callers - * that hold pages in PageWriteback to aggregate I/O until + * that hold folios in writeback to aggregate I/O until * the writeback iteration finishes, we do not loop back to the - * start of the file. Doing so causes a page lock/page + * start of the file. Doing so causes a folio lock/folio * writeback access order inversion - we should only ever lock - * multiple pages in ascending page->index order, and looping + * multiple folios in ascending folio->index order, and looping * back to the start of the file violates that rule and causes * deadlocks. */ diff --git a/mm/truncate.c b/mm/truncate.c index 5d98054094d1..3c1bcdc3a3e9 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -421,7 +421,7 @@ void truncate_inode_pages_range(struct address_space *mapping, for (i = 0; i < folio_batch_count(&fbatch); i++) { struct folio *folio = fbatch.folios[i]; - /* We rely upon deletion not changing page->index */ + /* We rely upon deletion not changing folio->index */ if (xa_is_value(folio)) continue; diff --git a/mm/zpdesc.h b/mm/zpdesc.h index 57e7a4d6c6ca..d3df316e5bb7 100644 --- a/mm/zpdesc.h +++ b/mm/zpdesc.h @@ -54,8 +54,8 @@ struct zpdesc { ZPDESC_MATCH(flags, flags); ZPDESC_MATCH(lru, lru); ZPDESC_MATCH(mapping, movable_ops); -ZPDESC_MATCH(index, next); -ZPDESC_MATCH(index, handle); +ZPDESC_MATCH(__folio_index, next); +ZPDESC_MATCH(__folio_index, handle); ZPDESC_MATCH(private, zspage); ZPDESC_MATCH(page_type, first_obj_offset); ZPDESC_MATCH(_refcount, _refcount);