hostfs: Handle page write errors correctly
[linux-2.6-microblaze.git] / mm / shmem.c
index a6f5653..28a62be 100644 (file)
@@ -392,7 +392,7 @@ void shmem_uncharge(struct inode *inode, long pages)
        struct shmem_inode_info *info = SHMEM_I(inode);
        unsigned long flags;
 
-       /* nrpages adjustment done by __delete_from_page_cache() or caller */
+       /* nrpages adjustment done by __filemap_remove_folio() or caller */
 
        spin_lock_irqsave(&info->lock, flags);
        info->alloced -= pages;
@@ -693,7 +693,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
 /*
- * Like add_to_page_cache_locked, but error if expected item has gone.
+ * Like filemap_add_folio, but error if expected item has gone.
  */
 static int shmem_add_to_page_cache(struct folio *folio,
                                   struct address_space *mapping,
@@ -867,18 +867,17 @@ unsigned long shmem_swap_usage(struct vm_area_struct *vma)
  */
 void shmem_unlock_mapping(struct address_space *mapping)
 {
-       struct pagevec pvec;
+       struct folio_batch fbatch;
        pgoff_t index = 0;
 
-       pagevec_init(&pvec);
+       folio_batch_init(&fbatch);
        /*
         * Minor point, but we might as well stop if someone else SHM_LOCKs it.
         */
-       while (!mapping_unevictable(mapping)) {
-               if (!pagevec_lookup(&pvec, mapping, &index))
-                       break;
-               check_move_unevictable_pages(&pvec);
-               pagevec_release(&pvec);
+       while (!mapping_unevictable(mapping) &&
+              filemap_get_folios(mapping, &index, ~0UL, &fbatch)) {
+               check_move_unevictable_folios(&fbatch);
+               folio_batch_release(&fbatch);
                cond_resched();
        }
 }