RDMA/odp: Iterate over the whole rbtree directly
authorJason Gunthorpe <jgg@mellanox.com>
Mon, 19 Aug 2019 11:17:00 +0000 (14:17 +0300)
committerJason Gunthorpe <jgg@mellanox.com>
Wed, 21 Aug 2019 17:08:24 +0000 (14:08 -0300)
Instead of intersecting a full interval, just iterate over every element
directly. This is faster and clearer.

Link: https://lore.kernel.org/r/20190819111710.18440-3-leon@kernel.org
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
drivers/infiniband/core/umem_odp.c
drivers/infiniband/hw/mlx5/odp.c

index 6c17a7c..77adf40 100644 (file)
@@ -72,31 +72,34 @@ static void ib_umem_notifier_end_account(struct ib_umem_odp *umem_odp)
        mutex_unlock(&umem_odp->umem_mutex);
 }
 
-static int ib_umem_notifier_release_trampoline(struct ib_umem_odp *umem_odp,
-                                              u64 start, u64 end, void *cookie)
-{
-       /*
-        * Increase the number of notifiers running, to
-        * prevent any further fault handling on this MR.
-        */
-       ib_umem_notifier_start_account(umem_odp);
-       complete_all(&umem_odp->notifier_completion);
-       umem_odp->umem.context->invalidate_range(
-               umem_odp, ib_umem_start(umem_odp), ib_umem_end(umem_odp));
-       return 0;
-}
-
 static void ib_umem_notifier_release(struct mmu_notifier *mn,
                                     struct mm_struct *mm)
 {
        struct ib_ucontext_per_mm *per_mm =
                container_of(mn, struct ib_ucontext_per_mm, mn);
+       struct rb_node *node;
 
        down_read(&per_mm->umem_rwsem);
-       if (per_mm->active)
-               rbt_ib_umem_for_each_in_range(
-                       &per_mm->umem_tree, 0, ULLONG_MAX,
-                       ib_umem_notifier_release_trampoline, true, NULL);
+       if (!per_mm->active)
+               goto out;
+
+       for (node = rb_first_cached(&per_mm->umem_tree); node;
+            node = rb_next(node)) {
+               struct ib_umem_odp *umem_odp =
+                       rb_entry(node, struct ib_umem_odp, interval_tree.rb);
+
+               /*
+                * Increase the number of notifiers running, to prevent any
+                * further fault handling on this MR.
+                */
+               ib_umem_notifier_start_account(umem_odp);
+               complete_all(&umem_odp->notifier_completion);
+               umem_odp->umem.context->invalidate_range(
+                       umem_odp, ib_umem_start(umem_odp),
+                       ib_umem_end(umem_odp));
+       }
+
+out:
        up_read(&per_mm->umem_rwsem);
 }
 
@@ -756,4 +759,3 @@ int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
 
        return ret_val;
 }
-EXPORT_SYMBOL(rbt_ib_umem_for_each_in_range);
index 1d257d1..82b716a 100644 (file)
@@ -539,34 +539,31 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
        return imr;
 }
 
-static int mr_leaf_free(struct ib_umem_odp *umem_odp, u64 start, u64 end,
-                       void *cookie)
+void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
 {
-       struct mlx5_ib_mr *mr = umem_odp->private, *imr = cookie;
-
-       if (mr->parent != imr)
-               return 0;
-
-       ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
-                                   ib_umem_end(umem_odp));
+       struct ib_ucontext_per_mm *per_mm = mr_to_per_mm(imr);
+       struct rb_node *node;
 
-       if (umem_odp->dying)
-               return 0;
+       down_read(&per_mm->umem_rwsem);
+       for (node = rb_first_cached(&per_mm->umem_tree); node;
+            node = rb_next(node)) {
+               struct ib_umem_odp *umem_odp =
+                       rb_entry(node, struct ib_umem_odp, interval_tree.rb);
+               struct mlx5_ib_mr *mr = umem_odp->private;
 
-       WRITE_ONCE(umem_odp->dying, 1);
-       atomic_inc(&imr->num_leaf_free);
-       schedule_work(&umem_odp->work);
+               if (mr->parent != imr)
+                       continue;
 
-       return 0;
-}
+               ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
+                                           ib_umem_end(umem_odp));
 
-void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
-{
-       struct ib_ucontext_per_mm *per_mm = mr_to_per_mm(imr);
+               if (umem_odp->dying)
+                       continue;
 
-       down_read(&per_mm->umem_rwsem);
-       rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, 0, ULLONG_MAX,
-                                     mr_leaf_free, true, imr);
+               WRITE_ONCE(umem_odp->dying, 1);
+               atomic_inc(&imr->num_leaf_free);
+               schedule_work(&umem_odp->work);
+       }
        up_read(&per_mm->umem_rwsem);
 
        wait_event(imr->q_leaf_free, !atomic_read(&imr->num_leaf_free));