mm, truncate: remove all exceptional entries from pagevec under one lock
authorMel Gorman <mgorman@techsingularity.net>
Thu, 16 Nov 2017 01:37:44 +0000 (17:37 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 16 Nov 2017 02:21:06 +0000 (18:21 -0800)
During truncate each entry in a pagevec is checked to see if it is an
exceptional entry and if so, the shadow entry is cleaned up.  This is
potentially expensive as multiple entries for a mapping locks/unlocks
the tree lock.  This batches the operation such that any exceptional
entries removed from a pagevec only acquire the mapping tree lock once.
The corner case where this is more expensive is where there is only one
exceptional entry but this is unlikely due to temporal locality and how
it affects LRU ordering.  Note that for truncations of small files
created recently, this patch should show no gain because it only batches
the handling of exceptional entries.

sparsetruncate (large)
                              4.14.0-rc4             4.14.0-rc4
                         pickhelper-v1r1       batchshadow-v1r1
Min          Time       38.00 (   0.00%)       27.00 (  28.95%)
1st-qrtle    Time       40.00 (   0.00%)       28.00 (  30.00%)
2nd-qrtle    Time       44.00 (   0.00%)       41.00 (   6.82%)
3rd-qrtle    Time      146.00 (   0.00%)      147.00 (  -0.68%)
Max-90%      Time      153.00 (   0.00%)      153.00 (   0.00%)
Max-95%      Time      155.00 (   0.00%)      156.00 (  -0.65%)
Max-99%      Time      181.00 (   0.00%)      171.00 (   5.52%)
Amean        Time       93.04 (   0.00%)       88.43 (   4.96%)
Best99%Amean Time       92.08 (   0.00%)       86.13 (   6.46%)
Best95%Amean Time       89.19 (   0.00%)       83.13 (   6.80%)
Best90%Amean Time       85.60 (   0.00%)       79.15 (   7.53%)
Best75%Amean Time       72.95 (   0.00%)       65.09 (  10.78%)
Best50%Amean Time       39.86 (   0.00%)       28.20 (  29.25%)
Best25%Amean Time       39.44 (   0.00%)       27.70 (  29.77%)

bonnie
                                      4.14.0-rc4             4.14.0-rc4
                                 pickhelper-v1r1       batchshadow-v1r1
Hmean     SeqCreate ops         71.92 (   0.00%)       76.78 (   6.76%)
Hmean     SeqCreate read        42.42 (   0.00%)       45.01 (   6.10%)
Hmean     SeqCreate del      26519.88 (   0.00%)    27191.87 (   2.53%)
Hmean     RandCreate ops        71.92 (   0.00%)       76.95 (   7.00%)
Hmean     RandCreate read       44.44 (   0.00%)       49.23 (  10.78%)
Hmean     RandCreate del     24948.62 (   0.00%)    24764.97 (  -0.74%)

Truncation of a large number of files shows a substantial gain with 99%
of files being truncated 6.46% faster.  bonnie shows a modest gain of
2.53%

[jack@suse.cz: fix truncate_exceptional_pvec_entries()]
Link: http://lkml.kernel.org/r/20171108164226.26788-1-jack@suse.cz
Link: http://lkml.kernel.org/r/20171018075952.10627-4-mgorman@techsingularity.net
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Signed-off-by: Jan Kara <jack@suse.cz>
Reviewed-by: Jan Kara <jack@suse.cz>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/truncate.c

index 02a0c04..c30e8fa 100644 (file)
 #include <linux/rmap.h>
 #include "internal.h"
 
-static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
-                              void *entry)
+/*
+ * Regular page slots are stabilized by the page lock even without the tree
+ * itself locked.  These unlocked entries need verification under the tree
+ * lock.
+ */
+static inline void __clear_shadow_entry(struct address_space *mapping,
+                               pgoff_t index, void *entry)
 {
        struct radix_tree_node *node;
        void **slot;
 
-       spin_lock_irq(&mapping->tree_lock);
-       /*
-        * Regular page slots are stabilized by the page lock even
-        * without the tree itself locked.  These unlocked entries
-        * need verification under the tree lock.
-        */
        if (!__radix_tree_lookup(&mapping->page_tree, index, &node, &slot))
-               goto unlock;
+               return;
        if (*slot != entry)
-               goto unlock;
+               return;
        __radix_tree_replace(&mapping->page_tree, node, slot, NULL,
                             workingset_update_node);
        mapping->nrexceptional--;
-unlock:
+}
+
+static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
+                              void *entry)
+{
+       spin_lock_irq(&mapping->tree_lock);
+       __clear_shadow_entry(mapping, index, entry);
        spin_unlock_irq(&mapping->tree_lock);
 }
 
 /*
- * Unconditionally remove exceptional entry. Usually called from truncate path.
+ * Unconditionally remove exceptional entries. Usually called from truncate
+ * path. Note that the pagevec may be altered by this function by removing
+ * exceptional entries similar to what pagevec_remove_exceptionals does.
  */
-static void truncate_exceptional_entry(struct address_space *mapping,
-                                      pgoff_t index, void *entry)
+static void truncate_exceptional_pvec_entries(struct address_space *mapping,
+                               struct pagevec *pvec, pgoff_t *indices,
+                               pgoff_t end)
 {
+       int i, j;
+       bool dax, lock;
+
        /* Handled by shmem itself */
        if (shmem_mapping(mapping))
                return;
 
-       if (dax_mapping(mapping)) {
-               dax_delete_mapping_entry(mapping, index);
+       for (j = 0; j < pagevec_count(pvec); j++)
+               if (radix_tree_exceptional_entry(pvec->pages[j]))
+                       break;
+
+       if (j == pagevec_count(pvec))
                return;
+
+       dax = dax_mapping(mapping);
+       lock = !dax && indices[j] < end;
+       if (lock)
+               spin_lock_irq(&mapping->tree_lock);
+
+       for (i = j; i < pagevec_count(pvec); i++) {
+               struct page *page = pvec->pages[i];
+               pgoff_t index = indices[i];
+
+               if (!radix_tree_exceptional_entry(page)) {
+                       pvec->pages[j++] = page;
+                       continue;
+               }
+
+               if (index >= end)
+                       continue;
+
+               if (unlikely(dax)) {
+                       dax_delete_mapping_entry(mapping, index);
+                       continue;
+               }
+
+               __clear_shadow_entry(mapping, index, page);
        }
-       clear_shadow_entry(mapping, index, entry);
+
+       if (lock)
+               spin_unlock_irq(&mapping->tree_lock);
+       pvec->nr = j;
 }
 
 /*
@@ -310,11 +351,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
                        if (index >= end)
                                break;
 
-                       if (radix_tree_exceptional_entry(page)) {
-                               truncate_exceptional_entry(mapping, index,
-                                                          page);
+                       if (radix_tree_exceptional_entry(page))
                                continue;
-                       }
 
                        if (!trylock_page(page))
                                continue;
@@ -334,12 +372,11 @@ void truncate_inode_pages_range(struct address_space *mapping,
                delete_from_page_cache_batch(mapping, &locked_pvec);
                for (i = 0; i < pagevec_count(&locked_pvec); i++)
                        unlock_page(locked_pvec.pages[i]);
-               pagevec_remove_exceptionals(&pvec);
+               truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
                pagevec_release(&pvec);
                cond_resched();
                index++;
        }
-
        if (partial_start) {
                struct page *page = find_lock_page(mapping, start - 1);
                if (page) {
@@ -397,6 +434,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
                        pagevec_release(&pvec);
                        break;
                }
+
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        struct page *page = pvec.pages[i];
 
@@ -408,11 +446,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
                                break;
                        }
 
-                       if (radix_tree_exceptional_entry(page)) {
-                               truncate_exceptional_entry(mapping, index,
-                                                          page);
+                       if (radix_tree_exceptional_entry(page))
                                continue;
-                       }
 
                        lock_page(page);
                        WARN_ON(page_to_index(page) != index);
@@ -420,7 +455,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
                        truncate_inode_page(mapping, page);
                        unlock_page(page);
                }
-               pagevec_remove_exceptionals(&pvec);
+               truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
                pagevec_release(&pvec);
                index++;
        }