rmap: replace two calls to compound_order with folio_order
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Thu, 15 Feb 2024 20:53:05 +0000 (20:53 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 22 Feb 2024 23:27:20 +0000 (15:27 -0800)
Removes two unnecessary conversions from folio to page.  Should be no
difference in behaviour.

Link: https://lkml.kernel.org/r/20240215205307.674707-1-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/rmap.c

index 1cf2bff..3746a55 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -2169,7 +2169,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
                                swp_pte = pte_swp_mkuffd_wp(swp_pte);
                        set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
                        trace_set_migration_pte(pvmw.address, pte_val(swp_pte),
-                                               compound_order(&folio->page));
+                                               folio_order(folio));
                        /*
                         * No need to invalidate here it will synchronize on
                         * against the special swap migration pte.
@@ -2261,7 +2261,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
                        else
                                set_pte_at(mm, address, pvmw.pte, swp_pte);
                        trace_set_migration_pte(address, pte_val(swp_pte),
-                                               compound_order(&folio->page));
+                                               folio_order(folio));
                        /*
                         * No need to invalidate here it will synchronize on
                         * against the special swap migration pte.