dm btree: prefetch child nodes when walking tree for a dm_btree_del
authorJoe Thornber <ejt@redhat.com>
Fri, 9 Aug 2013 11:59:30 +0000 (12:59 +0100)
committerMike Snitzer <snitzer@redhat.com>
Fri, 23 Aug 2013 13:02:14 +0000 (09:02 -0400)
dm-btree now takes advantage of dm-bufio's ability to prefetch data via
dm_bm_prefetch().  Prior to this change many btree node visits were
causing a synchronous read.

Signed-off-by: Joe Thornber <ejt@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
drivers/md/persistent-data/dm-block-manager.c
drivers/md/persistent-data/dm-block-manager.h
drivers/md/persistent-data/dm-btree.c

index 81b5138..a7e8bf2 100644 (file)
@@ -615,6 +615,11 @@ int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
 }
 EXPORT_SYMBOL_GPL(dm_bm_flush_and_unlock);
 
+void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b)
+{
+       dm_bufio_prefetch(bm->bufio, b, 1);
+}
+
 void dm_bm_set_read_only(struct dm_block_manager *bm)
 {
        bm->read_only = true;
index be5bff6..9a82083 100644 (file)
@@ -108,6 +108,11 @@ int dm_bm_unlock(struct dm_block *b);
 int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
                           struct dm_block *superblock);
 
+ /*
+  * Request data be prefetched into the cache.
+  */
+void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b);
+
 /*
  * Switches the bm to a read only mode.  Once read-only mode
  * has been entered the following functions will return -EPERM.
index e872996..468e371 100644 (file)
@@ -161,6 +161,7 @@ struct frame {
 };
 
 struct del_stack {
+       struct dm_btree_info *info;
        struct dm_transaction_manager *tm;
        int top;
        struct frame spine[MAX_SPINE_DEPTH];
@@ -183,6 +184,20 @@ static int unprocessed_frames(struct del_stack *s)
        return s->top >= 0;
 }
 
+static void prefetch_children(struct del_stack *s, struct frame *f)
+{
+       unsigned i;
+       struct dm_block_manager *bm = dm_tm_get_bm(s->tm);
+
+       for (i = 0; i < f->nr_children; i++)
+               dm_bm_prefetch(bm, value64(f->n, i));
+}
+
+static bool is_internal_level(struct dm_btree_info *info, struct frame *f)
+{
+       return f->level < (info->levels - 1);
+}
+
 static int push_frame(struct del_stack *s, dm_block_t b, unsigned level)
 {
        int r;
@@ -205,6 +220,7 @@ static int push_frame(struct del_stack *s, dm_block_t b, unsigned level)
                dm_tm_dec(s->tm, b);
 
        else {
+               uint32_t flags;
                struct frame *f = s->spine + ++s->top;
 
                r = dm_tm_read_lock(s->tm, b, &btree_node_validator, &f->b);
@@ -217,6 +233,10 @@ static int push_frame(struct del_stack *s, dm_block_t b, unsigned level)
                f->level = level;
                f->nr_children = le32_to_cpu(f->n->header.nr_entries);
                f->current_child = 0;
+
+               flags = le32_to_cpu(f->n->header.flags);
+               if (flags & INTERNAL_NODE || is_internal_level(s->info, f))
+                       prefetch_children(s, f);
        }
 
        return 0;
@@ -230,11 +250,6 @@ static void pop_frame(struct del_stack *s)
        dm_tm_unlock(s->tm, f->b);
 }
 
-static bool is_internal_level(struct dm_btree_info *info, struct frame *f)
-{
-       return f->level < (info->levels - 1);
-}
-
 int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
 {
        int r;
@@ -243,6 +258,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
        s = kmalloc(sizeof(*s), GFP_KERNEL);
        if (!s)
                return -ENOMEM;
+       s->info = info;
        s->tm = info->tm;
        s->top = -1;