perf symbol: Look for ImageBase in PE file to compute .text offset
[linux-2.6-microblaze.git] / mm / shmem.c
index e28d259..8874295 100644 (file)
@@ -38,8 +38,7 @@
 #include <linux/hugetlb.h>
 #include <linux/frontswap.h>
 #include <linux/fs_parser.h>
-
-#include <asm/tlbflush.h> /* for arch/microblaze update_mmu_cache() */
+#include <linux/swapfile.h>
 
 static struct vfsmount *shm_mnt;
 
@@ -96,7 +95,7 @@ static struct vfsmount *shm_mnt;
 
 /*
  * shmem_fallocate communicates with shmem_fault or shmem_writepage via
- * inode->i_private (with i_mutex making sure that it has only one user at
+ * inode->i_private (with i_rwsem making sure that it has only one user at
  * a time): we would prefer not to enlarge the shmem inode just for that.
  */
 struct shmem_falloc {
@@ -137,9 +136,6 @@ static unsigned long shmem_default_max_inodes(void)
 }
 #endif
 
-static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
-static int shmem_replace_page(struct page **pagep, gfp_t gfp,
-                               struct shmem_inode_info *info, pgoff_t index);
 static int shmem_swapin_page(struct inode *inode, pgoff_t index,
                             struct page **pagep, enum sgp_type sgp,
                             gfp_t gfp, struct vm_area_struct *vma,
@@ -475,7 +471,38 @@ static bool shmem_confirm_swap(struct address_space *mapping,
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 /* ifdef here to avoid bloating shmem.o when not necessary */
 
-static int shmem_huge __read_mostly;
+static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
+
+bool shmem_is_huge(struct vm_area_struct *vma,
+                  struct inode *inode, pgoff_t index)
+{
+       loff_t i_size;
+
+       if (shmem_huge == SHMEM_HUGE_DENY)
+               return false;
+       if (vma && ((vma->vm_flags & VM_NOHUGEPAGE) ||
+           test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)))
+               return false;
+       if (shmem_huge == SHMEM_HUGE_FORCE)
+               return true;
+
+       switch (SHMEM_SB(inode->i_sb)->huge) {
+       case SHMEM_HUGE_ALWAYS:
+               return true;
+       case SHMEM_HUGE_WITHIN_SIZE:
+               index = round_up(index, HPAGE_PMD_NR);
+               i_size = round_up(i_size_read(inode), PAGE_SIZE);
+               if (i_size >= HPAGE_PMD_SIZE && (i_size >> PAGE_SHIFT) >= index)
+                       return true;
+               fallthrough;
+       case SHMEM_HUGE_ADVISE:
+               if (vma && (vma->vm_flags & VM_HUGEPAGE))
+                       return true;
+               fallthrough;
+       default:
+               return false;
+       }
+}
 
 #if defined(CONFIG_SYSFS)
 static int shmem_parse_huge(const char *str)
@@ -646,6 +673,12 @@ static long shmem_unused_huge_count(struct super_block *sb,
 
 #define shmem_huge SHMEM_HUGE_DENY
 
+bool shmem_is_huge(struct vm_area_struct *vma,
+                  struct inode *inode, pgoff_t index)
+{
+       return false;
+}
+
 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
                struct shrink_control *sc, unsigned long nr_to_split)
 {
@@ -653,15 +686,6 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
-static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo)
-{
-       if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
-           (shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) &&
-           shmem_huge != SHMEM_HUGE_DENY)
-               return true;
-       return false;
-}
-
 /*
  * Like add_to_page_cache_locked, but error if expected item has gone.
  */
@@ -775,7 +799,7 @@ static int shmem_free_swap(struct address_space *mapping,
  * Determine (in bytes) how many of the shmem object's pages mapped by the
  * given offsets are swapped out.
  *
- * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
+ * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
  * as long as the inode doesn't go away and racy results are not a problem.
  */
 unsigned long shmem_partial_swap_usage(struct address_space *mapping,
@@ -807,7 +831,7 @@ unsigned long shmem_partial_swap_usage(struct address_space *mapping,
  * Determine (in bytes) how many of the shmem object's pages mapped by the
  * given vma is swapped out.
  *
- * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
+ * This is safe to call without i_rwsem or the i_pages lock thanks to RCU,
  * as long as the inode doesn't go away and racy results are not a problem.
  */
 unsigned long shmem_swap_usage(struct vm_area_struct *vma)
@@ -906,6 +930,9 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
        if (lend == -1)
                end = -1;       /* unsigned, so actually very big */
 
+       if (info->fallocend > start && info->fallocend <= end && !unfalloc)
+               info->fallocend = start;
+
        pagevec_init(&pvec);
        index = start;
        while (index < end && find_lock_entries(mapping, index, end - 1,
@@ -1039,7 +1066,6 @@ static int shmem_getattr(struct user_namespace *mnt_userns,
 {
        struct inode *inode = path->dentry->d_inode;
        struct shmem_inode_info *info = SHMEM_I(inode);
-       struct shmem_sb_info *sb_info = SHMEM_SB(inode->i_sb);
 
        if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
                spin_lock_irq(&info->lock);
@@ -1048,7 +1074,7 @@ static int shmem_getattr(struct user_namespace *mnt_userns,
        }
        generic_fillattr(&init_user_ns, inode, stat);
 
-       if (is_huge_enabled(sb_info))
+       if (shmem_is_huge(NULL, inode, 0))
                stat->blksize = HPAGE_PMD_SIZE;
 
        return 0;
@@ -1059,7 +1085,6 @@ static int shmem_setattr(struct user_namespace *mnt_userns,
 {
        struct inode *inode = d_inode(dentry);
        struct shmem_inode_info *info = SHMEM_I(inode);
-       struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
        int error;
 
        error = setattr_prepare(&init_user_ns, dentry, attr);
@@ -1070,7 +1095,7 @@ static int shmem_setattr(struct user_namespace *mnt_userns,
                loff_t oldsize = inode->i_size;
                loff_t newsize = attr->ia_size;
 
-               /* protected by i_mutex */
+               /* protected by i_rwsem */
                if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
                    (newsize > oldsize && (info->seals & F_SEAL_GROW)))
                        return -EPERM;
@@ -1095,24 +1120,6 @@ static int shmem_setattr(struct user_namespace *mnt_userns,
                        if (oldsize > holebegin)
                                unmap_mapping_range(inode->i_mapping,
                                                        holebegin, 0, 1);
-
-                       /*
-                        * Part of the huge page can be beyond i_size: subject
-                        * to shrink under memory pressure.
-                        */
-                       if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
-                               spin_lock(&sbinfo->shrinklist_lock);
-                               /*
-                                * _careful to defend against unlocked access to
-                                * ->shrink_list in shmem_unused_huge_shrink()
-                                */
-                               if (list_empty_careful(&info->shrinklist)) {
-                                       list_add_tail(&info->shrinklist,
-                                                       &sbinfo->shrinklist);
-                                       sbinfo->shrinklist_len++;
-                               }
-                               spin_unlock(&sbinfo->shrinklist_lock);
-                       }
                }
        }
 
@@ -1157,8 +1164,6 @@ static void shmem_evict_inode(struct inode *inode)
        clear_inode(inode);
 }
 
-extern struct swap_info_struct *swap_info[];
-
 static int shmem_find_swap_entries(struct address_space *mapping,
                                   pgoff_t start, unsigned int nr_entries,
                                   struct page **entries, pgoff_t *indices,
@@ -1339,7 +1344,19 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
        swp_entry_t swap;
        pgoff_t index;
 
-       VM_BUG_ON_PAGE(PageCompound(page), page);
+       /*
+        * If /sys/kernel/mm/transparent_hugepage/shmem_enabled is "always" or
+        * "force", drivers/gpu/drm/i915/gem/i915_gem_shmem.c gets huge pages,
+        * and its shmem_writeback() needs them to be split when swapping.
+        */
+       if (PageTransCompound(page)) {
+               /* Ensure the subpages are still dirty */
+               SetPageDirty(page);
+               if (split_huge_page(page) < 0)
+                       goto redirty;
+               ClearPageDirty(page);
+       }
+
        BUG_ON(!PageLocked(page));
        mapping = page->mapping;
        index = page->index;
@@ -1799,7 +1816,6 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
        struct shmem_sb_info *sbinfo;
        struct mm_struct *charge_mm;
        struct page *page;
-       enum sgp_type sgp_huge = sgp;
        pgoff_t hindex = index;
        gfp_t huge_gfp;
        int error;
@@ -1808,8 +1824,6 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
 
        if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
                return -EFBIG;
-       if (sgp == SGP_NOHUGE || sgp == SGP_HUGE)
-               sgp = SGP_CACHE;
 repeat:
        if (sgp <= SGP_CACHE &&
            ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
@@ -1841,26 +1855,31 @@ repeat:
                return error;
        }
 
-       if (page)
+       if (page) {
                hindex = page->index;
-       if (page && sgp == SGP_WRITE)
-               mark_page_accessed(page);
-
-       /* fallocated page? */
-       if (page && !PageUptodate(page)) {
+               if (sgp == SGP_WRITE)
+                       mark_page_accessed(page);
+               if (PageUptodate(page))
+                       goto out;
+               /* fallocated page */
                if (sgp != SGP_READ)
                        goto clear;
                unlock_page(page);
                put_page(page);
-               page = NULL;
-               hindex = index;
        }
-       if (page || sgp == SGP_READ)
-               goto out;
 
        /*
-        * Fast cache lookup did not find it:
-        * bring it back from swap or allocate.
+        * SGP_READ: succeed on hole, with NULL page, letting caller zero.
+        * SGP_NOALLOC: fail on hole, with NULL page, letting caller fail.
+        */
+       *pagep = NULL;
+       if (sgp == SGP_READ)
+               return 0;
+       if (sgp == SGP_NOALLOC)
+               return -ENOENT;
+
+       /*
+        * Fast cache lookup and swap lookup did not find it: allocate.
         */
 
        if (vma && userfaultfd_missing(vma)) {
@@ -1868,36 +1887,12 @@ repeat:
                return 0;
        }
 
-       /* shmem_symlink() */
-       if (!shmem_mapping(mapping))
-               goto alloc_nohuge;
-       if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE)
+       /* Never use a huge page for shmem_symlink() */
+       if (S_ISLNK(inode->i_mode))
                goto alloc_nohuge;
-       if (shmem_huge == SHMEM_HUGE_FORCE)
-               goto alloc_huge;
-       switch (sbinfo->huge) {
-       case SHMEM_HUGE_NEVER:
+       if (!shmem_is_huge(vma, inode, index))
                goto alloc_nohuge;
-       case SHMEM_HUGE_WITHIN_SIZE: {
-               loff_t i_size;
-               pgoff_t off;
 
-               off = round_up(index, HPAGE_PMD_NR);
-               i_size = round_up(i_size_read(inode), PAGE_SIZE);
-               if (i_size >= HPAGE_PMD_SIZE &&
-                   i_size >> PAGE_SHIFT >= off)
-                       goto alloc_huge;
-
-               fallthrough;
-       }
-       case SHMEM_HUGE_ADVISE:
-               if (sgp_huge == SGP_HUGE)
-                       goto alloc_huge;
-               /* TODO: implement fadvise() hints */
-               goto alloc_nohuge;
-       }
-
-alloc_huge:
        huge_gfp = vma_thp_gfp_mask(vma);
        huge_gfp = limit_gfp_mask(huge_gfp, gfp);
        page = shmem_alloc_and_acct_page(huge_gfp, inode, index, true);
@@ -2053,14 +2048,13 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
        struct vm_area_struct *vma = vmf->vma;
        struct inode *inode = file_inode(vma->vm_file);
        gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
-       enum sgp_type sgp;
        int err;
        vm_fault_t ret = VM_FAULT_LOCKED;
 
        /*
         * Trinity finds that probing a hole which tmpfs is punching can
         * prevent the hole-punch from ever completing: which in turn
-        * locks writers out with its hold on i_mutex.  So refrain from
+        * locks writers out with its hold on i_rwsem.  So refrain from
         * faulting pages into the hole while it's being punched.  Although
         * shmem_undo_range() does remove the additions, it may be unable to
         * keep up, as each new page needs its own unmap_mapping_range() call,
@@ -2071,7 +2065,7 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
         * we just need to make racing faults a rare case.
         *
         * The implementation below would be much simpler if we just used a
-        * standard mutex or completion: but we cannot take i_mutex in fault,
+        * standard mutex or completion: but we cannot take i_rwsem in fault,
         * and bloating every shmem inode for this unlikely case would be sad.
         */
        if (unlikely(inode->i_private)) {
@@ -2116,15 +2110,7 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
                spin_unlock(&inode->i_lock);
        }
 
-       sgp = SGP_CACHE;
-
-       if ((vma->vm_flags & VM_NOHUGEPAGE) ||
-           test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
-               sgp = SGP_NOHUGE;
-       else if (vma->vm_flags & VM_HUGEPAGE)
-               sgp = SGP_HUGE;
-
-       err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
+       err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, SGP_CACHE,
                                  gfp, vma, vmf, &ret);
        if (err)
                return vmf_error(err);
@@ -2471,7 +2457,7 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
        struct shmem_inode_info *info = SHMEM_I(inode);
        pgoff_t index = pos >> PAGE_SHIFT;
 
-       /* i_mutex is held by caller */
+       /* i_rwsem is held by caller */
        if (unlikely(info->seals & (F_SEAL_GROW |
                                   F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
                if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
@@ -2571,7 +2557,7 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 
                /*
                 * We must evaluate after, since reads (unlike writes)
-                * are called without i_mutex protection against truncate
+                * are called without i_rwsem protection against truncate
                 */
                nr = PAGE_SIZE;
                i_size = i_size_read(inode);
@@ -2641,7 +2627,7 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
                return -ENXIO;
 
        inode_lock(inode);
-       /* We're holding i_mutex so we can access i_size directly */
+       /* We're holding i_rwsem so we can access i_size directly */
        offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence);
        if (offset >= 0)
                offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
@@ -2656,7 +2642,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
        struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
        struct shmem_inode_info *info = SHMEM_I(inode);
        struct shmem_falloc shmem_falloc;
-       pgoff_t start, index, end;
+       pgoff_t start, index, end, undo_fallocend;
        int error;
 
        if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
@@ -2670,7 +2656,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
                DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
 
-               /* protected by i_mutex */
+               /* protected by i_rwsem */
                if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
                        error = -EPERM;
                        goto out;
@@ -2725,7 +2711,16 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
        inode->i_private = &shmem_falloc;
        spin_unlock(&inode->i_lock);
 
-       for (index = start; index < end; index++) {
+       /*
+        * info->fallocend is only relevant when huge pages might be
+        * involved: to prevent split_huge_page() freeing fallocated
+        * pages when FALLOC_FL_KEEP_SIZE committed beyond i_size.
+        */
+       undo_fallocend = info->fallocend;
+       if (info->fallocend < end)
+               info->fallocend = end;
+
+       for (index = start; index < end; ) {
                struct page *page;
 
                /*
@@ -2739,6 +2734,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                else
                        error = shmem_getpage(inode, index, &page, SGP_FALLOC);
                if (error) {
+                       info->fallocend = undo_fallocend;
                        /* Remove the !PageUptodate pages we added */
                        if (index > start) {
                                shmem_undo_range(inode,
@@ -2748,13 +2744,26 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                        goto undone;
                }
 
+               index++;
+               /*
+                * Here is a more important optimization than it appears:
+                * a second SGP_FALLOC on the same huge page will clear it,
+                * making it PageUptodate and un-undoable if we fail later.
+                */
+               if (PageTransCompound(page)) {
+                       index = round_up(index, HPAGE_PMD_NR);
+                       /* Beware 32-bit wraparound */
+                       if (!index)
+                               index--;
+               }
+
                /*
                 * Inform shmem_writepage() how far we have reached.
                 * No need for lock or barrier: we have the page lock.
                 */
-               shmem_falloc.next++;
                if (!PageUptodate(page))
-                       shmem_falloc.nr_falloced++;
+                       shmem_falloc.nr_falloced += index - shmem_falloc.next;
+               shmem_falloc.next = index;
 
                /*
                 * If !PageUptodate, leave it that way so that freeable pages
@@ -3909,7 +3918,7 @@ int __init shmem_init(void)
        if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
                SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
        else
-               shmem_huge = 0; /* just in case it was patched */
+               shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */
 #endif
        return 0;
 
@@ -3978,42 +3987,6 @@ struct kobj_attribute shmem_enabled_attr =
        __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
 
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-bool shmem_huge_enabled(struct vm_area_struct *vma)
-{
-       struct inode *inode = file_inode(vma->vm_file);
-       struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
-       loff_t i_size;
-       pgoff_t off;
-
-       if (!transhuge_vma_enabled(vma, vma->vm_flags))
-               return false;
-       if (shmem_huge == SHMEM_HUGE_FORCE)
-               return true;
-       if (shmem_huge == SHMEM_HUGE_DENY)
-               return false;
-       switch (sbinfo->huge) {
-               case SHMEM_HUGE_NEVER:
-                       return false;
-               case SHMEM_HUGE_ALWAYS:
-                       return true;
-               case SHMEM_HUGE_WITHIN_SIZE:
-                       off = round_up(vma->vm_pgoff, HPAGE_PMD_NR);
-                       i_size = round_up(i_size_read(inode), PAGE_SIZE);
-                       if (i_size >= HPAGE_PMD_SIZE &&
-                                       i_size >> PAGE_SHIFT >= off)
-                               return true;
-                       fallthrough;
-               case SHMEM_HUGE_ADVISE:
-                       /* TODO: implement fadvise() hints */
-                       return (vma->vm_flags & VM_HUGEPAGE);
-               default:
-                       VM_BUG_ON(1);
-                       return false;
-       }
-}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-
 #else /* !CONFIG_SHMEM */
 
 /*