Merge tag 'for-linus-hmm' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
[linux-2.6-microblaze.git] / drivers / infiniband / hw / mlx5 / odp.c
index 63e0ebd..f924250 100644 (file)
@@ -251,6 +251,7 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
        const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT /
                                    sizeof(struct mlx5_mtt)) - 1;
        u64 idx = 0, blk_start_idx = 0;
+       u64 invalidations = 0;
        unsigned long start;
        unsigned long end;
        int in_block = 0;
@@ -292,6 +293,9 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
                                blk_start_idx = idx;
                                in_block = 1;
                        }
+
+                       /* Count page invalidations */
+                       invalidations += idx - blk_start_idx + 1;
                } else {
                        u64 umr_offset = idx & umr_block_mask;
 
@@ -309,6 +313,9 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
                                   idx - blk_start_idx + 1, 0,
                                   MLX5_IB_UPD_XLT_ZAP |
                                   MLX5_IB_UPD_XLT_ATOMIC);
+
+       mlx5_update_odp_stats(mr, invalidations, invalidations);
+
        /*
         * We are now sure that the device will not access the
         * memory. We can safely unmap it, and mark it as dirty if
@@ -392,8 +399,6 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
            MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
            !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled))
                caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT;
-
-       return;
 }
 
 static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
@@ -509,6 +514,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
        imr->ibmr.lkey = imr->mmkey.key;
        imr->ibmr.rkey = imr->mmkey.key;
        imr->umem = &umem_odp->umem;
+       imr->is_odp_implicit = true;
        atomic_set(&imr->num_deferred_work, 0);
        xa_init(&imr->implicit_children);
 
@@ -866,6 +872,13 @@ next_mr:
                if (ret < 0)
                        goto srcu_unlock;
 
+               /*
+                * When prefetching a page, page fault is generated
+                * in order to bring the page to the main memory.
+                * In the current flow, page faults are being counted.
+                */
+               mlx5_update_odp_stats(mr, faults, ret);
+
                npages += ret;
                ret = 0;
                break;