mm: migrate: remove unused mode argument
authorKeith Busch <keith.busch@intel.com>
Thu, 18 Jul 2019 22:58:46 +0000 (15:58 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 19 Jul 2019 00:08:07 +0000 (17:08 -0700)
migrate_page_move_mapping() doesn't use the mode argument.  Remove it
and update callers accordingly.

Link: http://lkml.kernel.org/r/20190508210301.8472-1-keith.busch@intel.com
Signed-off-by: Keith Busch <keith.busch@intel.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
fs/aio.c
fs/f2fs/data.c
fs/iomap.c
fs/ubifs/file.c
include/linux/migrate.h
mm/migrate.c

index 8327db0..8b3aa27 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -425,7 +425,7 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
        BUG_ON(PageWriteback(old));
        get_page(new);
 
-       rc = migrate_page_move_mapping(mapping, new, old, mode, 1);
+       rc = migrate_page_move_mapping(mapping, new, old, 1);
        if (rc != MIGRATEPAGE_SUCCESS) {
                put_page(new);
                goto out_unlock;
index 4eb2f39..abbf14e 100644 (file)
@@ -2919,7 +2919,7 @@ int f2fs_migrate_page(struct address_space *mapping,
        /* one extra reference was held for atomic_write page */
        extra_count = atomic_written ? 1 : 0;
        rc = migrate_page_move_mapping(mapping, newpage,
-                               page, mode, extra_count);
+                               page, extra_count);
        if (rc != MIGRATEPAGE_SUCCESS) {
                if (atomic_written)
                        mutex_unlock(&fi->inmem_lock);
index 217c3e5..3e7f16a 100644 (file)
@@ -566,7 +566,7 @@ iomap_migrate_page(struct address_space *mapping, struct page *newpage,
 {
        int ret;
 
-       ret = migrate_page_move_mapping(mapping, newpage, page, mode, 0);
+       ret = migrate_page_move_mapping(mapping, newpage, page, 0);
        if (ret != MIGRATEPAGE_SUCCESS)
                return ret;
 
index e5f8de6..400970d 100644 (file)
@@ -1470,7 +1470,7 @@ static int ubifs_migrate_page(struct address_space *mapping,
 {
        int rc;
 
-       rc = migrate_page_move_mapping(mapping, newpage, page, mode, 0);
+       rc = migrate_page_move_mapping(mapping, newpage, page, 0);
        if (rc != MIGRATEPAGE_SUCCESS)
                return rc;
 
index e13d9bf..7f04754 100644 (file)
@@ -77,8 +77,7 @@ extern void migrate_page_copy(struct page *newpage, struct page *page);
 extern int migrate_huge_page_move_mapping(struct address_space *mapping,
                                  struct page *newpage, struct page *page);
 extern int migrate_page_move_mapping(struct address_space *mapping,
-               struct page *newpage, struct page *page, enum migrate_mode mode,
-               int extra_count);
+               struct page *newpage, struct page *page, int extra_count);
 #else
 
 static inline void putback_movable_pages(struct list_head *l) {}
index 3445747..8992741 100644 (file)
@@ -394,8 +394,7 @@ static int expected_page_refs(struct address_space *mapping, struct page *page)
  * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
  */
 int migrate_page_move_mapping(struct address_space *mapping,
-               struct page *newpage, struct page *page, enum migrate_mode mode,
-               int extra_count)
+               struct page *newpage, struct page *page, int extra_count)
 {
        XA_STATE(xas, &mapping->i_pages, page_index(page));
        struct zone *oldzone, *newzone;
@@ -681,7 +680,7 @@ int migrate_page(struct address_space *mapping,
 
        BUG_ON(PageWriteback(page));    /* Writeback must be complete */
 
-       rc = migrate_page_move_mapping(mapping, newpage, page, mode, 0);
+       rc = migrate_page_move_mapping(mapping, newpage, page, 0);
 
        if (rc != MIGRATEPAGE_SUCCESS)
                return rc;
@@ -780,7 +779,7 @@ recheck_buffers:
                }
        }
 
-       rc = migrate_page_move_mapping(mapping, newpage, page, mode, 0);
+       rc = migrate_page_move_mapping(mapping, newpage, page, 0);
        if (rc != MIGRATEPAGE_SUCCESS)
                goto unlock_buffers;