2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_umem.h>
34 #include <rdma/ib_umem_odp.h>
35 #include <linux/kernel.h>
40 #include <linux/mlx5/eq.h>
42 /* Contains the details of a pagefault. */
43 struct mlx5_pagefault {
49 /* Initiator or send message responder pagefault details. */
51 /* Received packet size, only valid for responders. */
54 * Number of resource holding WQE, depends on type.
58 * WQE index. Refers to either the send queue or
59 * receive queue, according to event_subtype.
63 /* RDMA responder pagefault details */
67 * Received packet size, minimal size page fault
68 * resolution required for forward progress.
76 struct mlx5_ib_pf_eq *eq;
77 struct work_struct work;
80 #define MAX_PREFETCH_LEN (4*1024*1024U)
82 /* Timeout in ms to wait for an active mmu notifier to complete when handling
84 #define MMU_NOTIFIER_TIMEOUT 1000
86 #define MLX5_IMR_MTT_BITS (30 - PAGE_SHIFT)
87 #define MLX5_IMR_MTT_SHIFT (MLX5_IMR_MTT_BITS + PAGE_SHIFT)
88 #define MLX5_IMR_MTT_ENTRIES BIT_ULL(MLX5_IMR_MTT_BITS)
89 #define MLX5_IMR_MTT_SIZE BIT_ULL(MLX5_IMR_MTT_SHIFT)
90 #define MLX5_IMR_MTT_MASK (~(MLX5_IMR_MTT_SIZE - 1))
92 #define MLX5_KSM_PAGE_SHIFT MLX5_IMR_MTT_SHIFT
94 static u64 mlx5_imr_ksm_entries;
96 void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
97 struct mlx5_ib_mr *imr, int flags)
99 struct mlx5_klm *end = pklm + nentries;
101 if (flags & MLX5_IB_UPD_XLT_ZAP) {
102 for (; pklm != end; pklm++, idx++) {
103 pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
104 pklm->key = cpu_to_be32(imr->dev->null_mkey);
111 * The locking here is pretty subtle. Ideally the implicit_children
112 * xarray would be protected by the umem_mutex, however that is not
113 * possible. Instead this uses a weaker update-then-lock pattern:
117 * mutex_lock(umem_mutex)
118 * mlx5_ib_update_xlt()
119 * mutex_unlock(umem_mutex)
122 * ie any change the xarray must be followed by the locked update_xlt
125 * The umem_mutex provides the acquire/release semantic needed to make
126 * the xa_store() visible to a racing thread. While SRCU is not
127 * technically required, using it gives consistent use of the SRCU
128 * locking around the xarray.
130 lockdep_assert_held(&to_ib_umem_odp(imr->umem)->umem_mutex);
131 lockdep_assert_held(&imr->dev->odp_srcu);
133 for (; pklm != end; pklm++, idx++) {
134 struct mlx5_ib_mr *mtt = xa_load(&imr->implicit_children, idx);
136 pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
138 pklm->key = cpu_to_be32(mtt->ibmr.lkey);
139 pklm->va = cpu_to_be64(idx * MLX5_IMR_MTT_SIZE);
141 pklm->key = cpu_to_be32(imr->dev->null_mkey);
148 * This must be called after the mr has been removed from implicit_children
149 * and odp_mkeys and the SRCU synchronized. NOTE: The MR does not necessarily
150 * have to be empty here, parallel page faults could have raced with the free
151 * process and added pages to it.
153 static void free_implicit_child_mr(struct mlx5_ib_mr *mr, bool need_imr_xlt)
155 struct mlx5_ib_mr *imr = mr->parent;
156 struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem);
157 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
158 unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
161 /* implicit_child_mr's are not allowed to have deferred work */
162 WARN_ON(atomic_read(&mr->num_deferred_work));
165 srcu_key = srcu_read_lock(&mr->dev->odp_srcu);
166 mutex_lock(&odp_imr->umem_mutex);
167 mlx5_ib_update_xlt(mr->parent, idx, 1, 0,
168 MLX5_IB_UPD_XLT_INDIRECT |
169 MLX5_IB_UPD_XLT_ATOMIC);
170 mutex_unlock(&odp_imr->umem_mutex);
171 srcu_read_unlock(&mr->dev->odp_srcu, srcu_key);
175 mlx5_mr_cache_free(mr->dev, mr);
176 ib_umem_odp_release(odp);
177 atomic_dec(&imr->num_deferred_work);
180 static void free_implicit_child_mr_work(struct work_struct *work)
182 struct mlx5_ib_mr *mr =
183 container_of(work, struct mlx5_ib_mr, odp_destroy.work);
185 free_implicit_child_mr(mr, true);
188 static void free_implicit_child_mr_rcu(struct rcu_head *head)
190 struct mlx5_ib_mr *mr =
191 container_of(head, struct mlx5_ib_mr, odp_destroy.rcu);
193 /* Freeing a MR is a sleeping operation, so bounce to a work queue */
194 INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work);
195 queue_work(system_unbound_wq, &mr->odp_destroy.work);
198 static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
200 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
201 unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
202 struct mlx5_ib_mr *imr = mr->parent;
204 xa_lock(&imr->implicit_children);
206 * This can race with mlx5_ib_free_implicit_mr(), the first one to
207 * reach the xa lock wins the race and destroys the MR.
209 if (__xa_cmpxchg(&imr->implicit_children, idx, mr, NULL, GFP_ATOMIC) !=
213 __xa_erase(&mr->dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key));
214 atomic_inc(&imr->num_deferred_work);
215 call_srcu(&mr->dev->odp_srcu, &mr->odp_destroy.rcu,
216 free_implicit_child_mr_rcu);
219 xa_unlock(&imr->implicit_children);
222 void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
225 struct mlx5_ib_mr *mr;
226 const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT /
227 sizeof(struct mlx5_mtt)) - 1;
228 u64 idx = 0, blk_start_idx = 0;
233 pr_err("invalidation called on NULL umem or non-ODP umem\n");
237 mr = umem_odp->private;
239 if (!mr || !mr->ibmr.pd)
242 start = max_t(u64, ib_umem_start(umem_odp), start);
243 end = min_t(u64, ib_umem_end(umem_odp), end);
246 * Iteration one - zap the HW's MTTs. The notifiers_count ensures that
247 * while we are doing the invalidation, no page fault will attempt to
248 * overwrite the same MTTs. Concurent invalidations might race us,
249 * but they will write 0s as well, so no difference in the end result.
251 mutex_lock(&umem_odp->umem_mutex);
252 for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) {
253 idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
255 * Strive to write the MTTs in chunks, but avoid overwriting
256 * non-existing MTTs. The huristic here can be improved to
257 * estimate the cost of another UMR vs. the cost of bigger
260 if (umem_odp->dma_list[idx] &
261 (ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) {
267 u64 umr_offset = idx & umr_block_mask;
269 if (in_block && umr_offset == 0) {
270 mlx5_ib_update_xlt(mr, blk_start_idx,
271 idx - blk_start_idx, 0,
272 MLX5_IB_UPD_XLT_ZAP |
273 MLX5_IB_UPD_XLT_ATOMIC);
279 mlx5_ib_update_xlt(mr, blk_start_idx,
280 idx - blk_start_idx + 1, 0,
281 MLX5_IB_UPD_XLT_ZAP |
282 MLX5_IB_UPD_XLT_ATOMIC);
284 * We are now sure that the device will not access the
285 * memory. We can safely unmap it, and mark it as dirty if
289 ib_umem_odp_unmap_dma_pages(umem_odp, start, end);
291 if (unlikely(!umem_odp->npages && mr->parent))
292 destroy_unused_implicit_child_mr(mr);
293 mutex_unlock(&umem_odp->umem_mutex);
296 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
298 struct ib_odp_caps *caps = &dev->odp_caps;
300 memset(caps, 0, sizeof(*caps));
302 if (!MLX5_CAP_GEN(dev->mdev, pg) ||
303 !mlx5_ib_can_use_umr(dev, true))
306 caps->general_caps = IB_ODP_SUPPORT;
308 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
309 dev->odp_max_size = U64_MAX;
311 dev->odp_max_size = BIT_ULL(MLX5_MAX_UMR_SHIFT + PAGE_SHIFT);
313 if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send))
314 caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
316 if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.srq_receive))
317 caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
319 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.send))
320 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND;
322 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.receive))
323 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV;
325 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.write))
326 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE;
328 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read))
329 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
331 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.atomic))
332 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
334 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.srq_receive))
335 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
337 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.send))
338 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SEND;
340 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.receive))
341 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_RECV;
343 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.write))
344 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_WRITE;
346 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.read))
347 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_READ;
349 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.atomic))
350 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
352 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.srq_receive))
353 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
355 if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) &&
356 MLX5_CAP_GEN(dev->mdev, null_mkey) &&
357 MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
358 !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled))
359 caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT;
364 static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
365 struct mlx5_pagefault *pfault,
368 int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ?
369 pfault->wqe.wq_num : pfault->token;
370 u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = { };
371 u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = { };
374 MLX5_SET(page_fault_resume_in, in, opcode, MLX5_CMD_OP_PAGE_FAULT_RESUME);
375 MLX5_SET(page_fault_resume_in, in, page_fault_type, pfault->type);
376 MLX5_SET(page_fault_resume_in, in, token, pfault->token);
377 MLX5_SET(page_fault_resume_in, in, wq_number, wq_num);
378 MLX5_SET(page_fault_resume_in, in, error, !!error);
380 err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
382 mlx5_ib_err(dev, "Failed to resolve the page fault on WQ 0x%x err %d\n",
386 static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
389 struct ib_umem_odp *odp;
390 struct mlx5_ib_mr *mr;
391 struct mlx5_ib_mr *ret;
394 odp = ib_umem_odp_alloc_child(to_ib_umem_odp(imr->umem),
395 idx * MLX5_IMR_MTT_SIZE,
398 return ERR_CAST(odp);
400 ret = mr = mlx5_mr_cache_alloc(imr->dev, MLX5_IMR_MTT_CACHE_ENTRY);
404 err = xa_reserve(&imr->dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key),
411 mr->ibmr.pd = imr->ibmr.pd;
412 mr->access_flags = imr->access_flags;
413 mr->umem = &odp->umem;
414 mr->ibmr.lkey = mr->mmkey.key;
415 mr->ibmr.rkey = mr->mmkey.key;
416 mr->mmkey.iova = idx * MLX5_IMR_MTT_SIZE;
420 err = mlx5_ib_update_xlt(mr, 0,
421 MLX5_IMR_MTT_ENTRIES,
423 MLX5_IB_UPD_XLT_ZAP |
424 MLX5_IB_UPD_XLT_ENABLE);
431 * Once the store to either xarray completes any error unwind has to
432 * use synchronize_srcu(). Avoid this with xa_reserve()
434 ret = xa_cmpxchg(&imr->implicit_children, idx, NULL, mr,
437 xa_store(&imr->dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key),
438 &mr->mmkey, GFP_ATOMIC);
440 if (xa_is_err(ret)) {
441 ret = ERR_PTR(xa_err(ret));
445 * Another thread beat us to creating the child mr, use
451 mlx5_ib_dbg(imr->dev, "key %x mr %p\n", mr->mmkey.key, mr);
455 xa_release(&imr->dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key));
457 mlx5_mr_cache_free(imr->dev, mr);
459 ib_umem_odp_release(odp);
463 struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
464 struct ib_udata *udata,
467 struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
468 struct ib_umem_odp *umem_odp;
469 struct mlx5_ib_mr *imr;
472 umem_odp = ib_umem_odp_alloc_implicit(udata, access_flags);
473 if (IS_ERR(umem_odp))
474 return ERR_CAST(umem_odp);
476 imr = mlx5_mr_cache_alloc(dev, MLX5_IMR_KSM_CACHE_ENTRY);
482 imr->ibmr.pd = &pd->ibpd;
483 imr->access_flags = access_flags;
485 imr->umem = &umem_odp->umem;
486 imr->ibmr.lkey = imr->mmkey.key;
487 imr->ibmr.rkey = imr->mmkey.key;
488 imr->umem = &umem_odp->umem;
489 atomic_set(&imr->num_deferred_work, 0);
490 xa_init(&imr->implicit_children);
492 err = mlx5_ib_update_xlt(imr, 0,
493 mlx5_imr_ksm_entries,
495 MLX5_IB_UPD_XLT_INDIRECT |
496 MLX5_IB_UPD_XLT_ZAP |
497 MLX5_IB_UPD_XLT_ENABLE);
501 err = xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(imr->mmkey.key),
502 &imr->mmkey, GFP_KERNEL));
506 mlx5_ib_dbg(dev, "key %x mr %p\n", imr->mmkey.key, imr);
509 mlx5_ib_err(dev, "Failed to register MKEY %d\n", err);
510 mlx5_mr_cache_free(dev, imr);
512 ib_umem_odp_release(umem_odp);
516 void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
518 struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem);
519 struct mlx5_ib_dev *dev = imr->dev;
520 struct list_head destroy_list;
521 struct mlx5_ib_mr *mtt;
522 struct mlx5_ib_mr *tmp;
525 INIT_LIST_HEAD(&destroy_list);
527 xa_erase(&dev->odp_mkeys, mlx5_base_mkey(imr->mmkey.key));
529 * This stops the SRCU protected page fault path from touching either
530 * the imr or any children. The page fault path can only reach the
531 * children xarray via the imr.
533 synchronize_srcu(&dev->odp_srcu);
535 xa_lock(&imr->implicit_children);
536 xa_for_each (&imr->implicit_children, idx, mtt) {
537 __xa_erase(&imr->implicit_children, idx);
538 __xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mtt->mmkey.key));
539 list_add(&mtt->odp_destroy.elm, &destroy_list);
541 xa_unlock(&imr->implicit_children);
543 /* Fence access to the child pointers via the pagefault thread */
544 synchronize_srcu(&dev->odp_srcu);
547 * num_deferred_work can only be incremented inside the odp_srcu, or
548 * under xa_lock while the child is in the xarray. Thus at this point
549 * it is only decreasing, and all work holding it is now on the wq.
551 if (atomic_read(&imr->num_deferred_work)) {
552 flush_workqueue(system_unbound_wq);
553 WARN_ON(atomic_read(&imr->num_deferred_work));
556 list_for_each_entry_safe (mtt, tmp, &destroy_list, odp_destroy.elm)
557 free_implicit_child_mr(mtt, false);
559 mlx5_mr_cache_free(dev, imr);
560 ib_umem_odp_release(odp_imr);
563 #define MLX5_PF_FLAGS_DOWNGRADE BIT(1)
564 static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
565 u64 user_va, size_t bcnt, u32 *bytes_mapped,
568 int current_seq, page_shift, ret, np;
569 bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
571 u64 start_idx, page_mask;
573 page_shift = odp->page_shift;
574 page_mask = ~(BIT(page_shift) - 1);
575 start_idx = (user_va - (mr->mmkey.iova & page_mask)) >> page_shift;
576 access_mask = ODP_READ_ALLOWED_BIT;
578 if (odp->umem.writable && !downgrade)
579 access_mask |= ODP_WRITE_ALLOWED_BIT;
581 current_seq = READ_ONCE(odp->notifiers_seq);
583 * Ensure the sequence number is valid for some time before we call
588 np = ib_umem_odp_map_dma_pages(odp, user_va, bcnt, access_mask,
593 mutex_lock(&odp->umem_mutex);
594 if (!ib_umem_mmu_notifier_retry(odp, current_seq)) {
596 * No need to check whether the MTTs really belong to
597 * this MR, since ib_umem_odp_map_dma_pages already
600 ret = mlx5_ib_update_xlt(mr, start_idx, np,
601 page_shift, MLX5_IB_UPD_XLT_ATOMIC);
605 mutex_unlock(&odp->umem_mutex);
610 "Failed to update mkey page tables\n");
615 u32 new_mappings = (np << page_shift) -
616 (user_va - round_down(user_va, 1 << page_shift));
618 *bytes_mapped += min_t(u32, new_mappings, bcnt);
621 return np << (page_shift - PAGE_SHIFT);
624 if (ret == -EAGAIN) {
625 unsigned long timeout = msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
627 if (!wait_for_completion_timeout(&odp->notifier_completion,
631 "timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n",
632 current_seq, odp->notifiers_seq,
633 odp->notifiers_count);
640 static int pagefault_implicit_mr(struct mlx5_ib_mr *imr,
641 struct ib_umem_odp *odp_imr, u64 user_va,
642 size_t bcnt, u32 *bytes_mapped, u32 flags)
644 unsigned long end_idx = (user_va + bcnt - 1) >> MLX5_IMR_MTT_SHIFT;
645 unsigned long upd_start_idx = end_idx + 1;
646 unsigned long upd_len = 0;
647 unsigned long npages = 0;
651 if (unlikely(user_va >= mlx5_imr_ksm_entries * MLX5_IMR_MTT_SIZE ||
652 mlx5_imr_ksm_entries * MLX5_IMR_MTT_SIZE - user_va < bcnt))
655 /* Fault each child mr that intersects with our interval. */
657 unsigned long idx = user_va >> MLX5_IMR_MTT_SHIFT;
658 struct ib_umem_odp *umem_odp;
659 struct mlx5_ib_mr *mtt;
662 mtt = xa_load(&imr->implicit_children, idx);
663 if (unlikely(!mtt)) {
664 mtt = implicit_get_child_mr(imr, idx);
669 upd_start_idx = min(upd_start_idx, idx);
670 upd_len = idx - upd_start_idx + 1;
673 umem_odp = to_ib_umem_odp(mtt->umem);
674 len = min_t(u64, user_va + bcnt, ib_umem_end(umem_odp)) -
677 ret = pagefault_real_mr(mtt, umem_odp, user_va, len,
678 bytes_mapped, flags);
689 * Any time the implicit_children are changed we must perform an
690 * update of the xlt before exiting to ensure the HW and the
691 * implicit_children remains synchronized.
694 if (likely(!upd_len))
698 * Notice this is not strictly ordered right, the KSM is updated after
699 * the implicit_children is updated, so a parallel page fault could
700 * see a MR that is not yet visible in the KSM. This is similar to a
701 * parallel page fault seeing a MR that is being concurrently removed
702 * from the KSM. Both of these improbable situations are resolved
703 * safely by resuming the HW and then taking another page fault. The
704 * next pagefault handler will see the new information.
706 mutex_lock(&odp_imr->umem_mutex);
707 err = mlx5_ib_update_xlt(imr, upd_start_idx, upd_len, 0,
708 MLX5_IB_UPD_XLT_INDIRECT |
709 MLX5_IB_UPD_XLT_ATOMIC);
710 mutex_unlock(&odp_imr->umem_mutex);
712 mlx5_ib_err(imr->dev, "Failed to update PAS\n");
720 * -EFAULT: The io_virt->bcnt is not within the MR, it covers pages that are
721 * not accessible, or the MR is no longer valid.
722 * -EAGAIN/-ENOMEM: The operation should be retried
724 * -EINVAL/others: General internal malfunction
725 * >0: Number of pages mapped
727 static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
728 u32 *bytes_mapped, u32 flags)
730 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
732 if (!odp->is_implicit_odp) {
733 if (unlikely(io_virt < ib_umem_start(odp) ||
734 ib_umem_end(odp) - io_virt < bcnt))
736 return pagefault_real_mr(mr, odp, io_virt, bcnt, bytes_mapped,
739 return pagefault_implicit_mr(mr, odp, io_virt, bcnt, bytes_mapped,
744 struct pf_frame *next;
751 static bool mkey_is_eq(struct mlx5_core_mkey *mmkey, u32 key)
755 if (mmkey->type == MLX5_MKEY_MW)
756 return mlx5_base_mkey(mmkey->key) == mlx5_base_mkey(key);
757 return mmkey->key == key;
760 static int get_indirect_num_descs(struct mlx5_core_mkey *mmkey)
762 struct mlx5_ib_mw *mw;
763 struct mlx5_ib_devx_mr *devx_mr;
765 if (mmkey->type == MLX5_MKEY_MW) {
766 mw = container_of(mmkey, struct mlx5_ib_mw, mmkey);
770 devx_mr = container_of(mmkey, struct mlx5_ib_devx_mr,
772 return devx_mr->ndescs;
776 * Handle a single data segment in a page-fault WQE or RDMA region.
778 * Returns number of OS pages retrieved on success. The caller may continue to
779 * the next data segment.
780 * Can return the following error codes:
781 * -EAGAIN to designate a temporary error. The caller will abort handling the
782 * page fault and resolve it.
783 * -EFAULT when there's an error mapping the requested pages. The caller will
784 * abort the page fault handling.
786 static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
787 struct ib_pd *pd, u32 key,
788 u64 io_virt, size_t bcnt,
789 u32 *bytes_committed,
792 int npages = 0, srcu_key, ret, i, outlen, cur_outlen = 0, depth = 0;
793 struct pf_frame *head = NULL, *frame;
794 struct mlx5_core_mkey *mmkey;
795 struct mlx5_ib_mr *mr;
796 struct mlx5_klm *pklm;
801 srcu_key = srcu_read_lock(&dev->odp_srcu);
803 io_virt += *bytes_committed;
804 bcnt -= *bytes_committed;
807 mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(key));
811 "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
814 *bytes_mapped += bcnt;
816 * The user could specify a SGL with multiple lkeys and only
817 * some of them are ODP. Treat the non-ODP ones as fully
823 if (!mkey_is_eq(mmkey, key)) {
824 mlx5_ib_dbg(dev, "failed to find mkey %x\n", key);
829 switch (mmkey->type) {
831 mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
833 ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0);
842 case MLX5_MKEY_INDIRECT_DEVX:
843 ndescs = get_indirect_num_descs(mmkey);
845 if (depth >= MLX5_CAP_GEN(dev->mdev, max_indirection)) {
846 mlx5_ib_dbg(dev, "indirection level exceeded\n");
851 outlen = MLX5_ST_SZ_BYTES(query_mkey_out) +
852 sizeof(*pklm) * (ndescs - 2);
854 if (outlen > cur_outlen) {
856 out = kzalloc(outlen, GFP_KERNEL);
864 pklm = (struct mlx5_klm *)MLX5_ADDR_OF(query_mkey_out, out,
865 bsf0_klm0_pas_mtt0_1);
867 ret = mlx5_core_query_mkey(dev->mdev, mmkey, out, outlen);
871 offset = io_virt - MLX5_GET64(query_mkey_out, out,
872 memory_key_mkey_entry.start_addr);
874 for (i = 0; bcnt && i < ndescs; i++, pklm++) {
875 if (offset >= be32_to_cpu(pklm->bcount)) {
876 offset -= be32_to_cpu(pklm->bcount);
880 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
886 frame->key = be32_to_cpu(pklm->key);
887 frame->io_virt = be64_to_cpu(pklm->va) + offset;
888 frame->bcnt = min_t(size_t, bcnt,
889 be32_to_cpu(pklm->bcount) - offset);
890 frame->depth = depth + 1;
900 mlx5_ib_dbg(dev, "wrong mkey type %d\n", mmkey->type);
910 io_virt = frame->io_virt;
912 depth = frame->depth;
926 srcu_read_unlock(&dev->odp_srcu, srcu_key);
927 *bytes_committed = 0;
928 return ret ? ret : npages;
932 * Parse a series of data segments for page fault handling.
934 * @pfault contains page fault information.
935 * @wqe points at the first data segment in the WQE.
936 * @wqe_end points after the end of the WQE.
937 * @bytes_mapped receives the number of bytes that the function was able to
938 * map. This allows the caller to decide intelligently whether
939 * enough memory was mapped to resolve the page fault
940 * successfully (e.g. enough for the next MTU, or the entire
942 * @total_wqe_bytes receives the total data size of this WQE in bytes (minus
943 * the committed bytes).
945 * Returns the number of pages loaded if positive, zero for an empty WQE, or a
946 * negative error code.
948 static int pagefault_data_segments(struct mlx5_ib_dev *dev,
949 struct mlx5_pagefault *pfault,
951 void *wqe_end, u32 *bytes_mapped,
952 u32 *total_wqe_bytes, bool receive_queue)
954 int ret = 0, npages = 0;
964 *total_wqe_bytes = 0;
966 while (wqe < wqe_end) {
967 struct mlx5_wqe_data_seg *dseg = wqe;
969 io_virt = be64_to_cpu(dseg->addr);
970 key = be32_to_cpu(dseg->lkey);
971 byte_count = be32_to_cpu(dseg->byte_count);
972 inline_segment = !!(byte_count & MLX5_INLINE_SEG);
973 bcnt = byte_count & ~MLX5_INLINE_SEG;
975 if (inline_segment) {
976 bcnt = bcnt & MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK;
977 wqe += ALIGN(sizeof(struct mlx5_wqe_inline_seg) + bcnt,
980 wqe += sizeof(*dseg);
983 /* receive WQE end of sg list. */
984 if (receive_queue && bcnt == 0 && key == MLX5_INVALID_LKEY &&
988 if (!inline_segment && total_wqe_bytes) {
989 *total_wqe_bytes += bcnt - min_t(size_t, bcnt,
990 pfault->bytes_committed);
993 /* A zero length data segment designates a length of 2GB. */
997 if (inline_segment || bcnt <= pfault->bytes_committed) {
998 pfault->bytes_committed -=
1000 pfault->bytes_committed);
1004 ret = pagefault_single_data_segment(dev, NULL, key,
1006 &pfault->bytes_committed,
1013 return ret < 0 ? ret : npages;
1017 * Parse initiator WQE. Advances the wqe pointer to point at the
1018 * scatter-gather list, and set wqe_end to the end of the WQE.
1020 static int mlx5_ib_mr_initiator_pfault_handler(
1021 struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault,
1022 struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length)
1024 struct mlx5_wqe_ctrl_seg *ctrl = *wqe;
1025 u16 wqe_index = pfault->wqe.wqe_index;
1026 struct mlx5_base_av *av;
1027 unsigned ds, opcode;
1028 u32 qpn = qp->trans_qp.base.mqp.qpn;
1030 ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
1031 if (ds * MLX5_WQE_DS_UNITS > wqe_length) {
1032 mlx5_ib_err(dev, "Unable to read the complete WQE. ds = 0x%x, ret = 0x%x\n",
1038 mlx5_ib_err(dev, "Got WQE with zero DS. wqe_index=%x, qpn=%x\n",
1043 *wqe_end = *wqe + ds * MLX5_WQE_DS_UNITS;
1044 *wqe += sizeof(*ctrl);
1046 opcode = be32_to_cpu(ctrl->opmod_idx_opcode) &
1047 MLX5_WQE_CTRL_OPCODE_MASK;
1049 if (qp->ibqp.qp_type == IB_QPT_XRC_INI)
1050 *wqe += sizeof(struct mlx5_wqe_xrc_seg);
1052 if (qp->ibqp.qp_type == IB_QPT_UD ||
1053 qp->qp_sub_type == MLX5_IB_QPT_DCI) {
1055 if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV))
1056 *wqe += sizeof(struct mlx5_av);
1058 *wqe += sizeof(struct mlx5_base_av);
1062 case MLX5_OPCODE_RDMA_WRITE:
1063 case MLX5_OPCODE_RDMA_WRITE_IMM:
1064 case MLX5_OPCODE_RDMA_READ:
1065 *wqe += sizeof(struct mlx5_wqe_raddr_seg);
1067 case MLX5_OPCODE_ATOMIC_CS:
1068 case MLX5_OPCODE_ATOMIC_FA:
1069 *wqe += sizeof(struct mlx5_wqe_raddr_seg);
1070 *wqe += sizeof(struct mlx5_wqe_atomic_seg);
1078 * Parse responder WQE and set wqe_end to the end of the WQE.
1080 static int mlx5_ib_mr_responder_pfault_handler_srq(struct mlx5_ib_dev *dev,
1081 struct mlx5_ib_srq *srq,
1082 void **wqe, void **wqe_end,
1085 int wqe_size = 1 << srq->msrq.wqe_shift;
1087 if (wqe_size > wqe_length) {
1088 mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n");
1092 *wqe_end = *wqe + wqe_size;
1093 *wqe += sizeof(struct mlx5_wqe_srq_next_seg);
1098 static int mlx5_ib_mr_responder_pfault_handler_rq(struct mlx5_ib_dev *dev,
1099 struct mlx5_ib_qp *qp,
1100 void *wqe, void **wqe_end,
1103 struct mlx5_ib_wq *wq = &qp->rq;
1104 int wqe_size = 1 << wq->wqe_shift;
1107 mlx5_ib_err(dev, "ODP fault with WQE signatures is not supported\n");
1111 if (wqe_size > wqe_length) {
1112 mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n");
1116 *wqe_end = wqe + wqe_size;
1121 static inline struct mlx5_core_rsc_common *odp_get_rsc(struct mlx5_ib_dev *dev,
1122 u32 wq_num, int pf_type)
1124 struct mlx5_core_rsc_common *common = NULL;
1125 struct mlx5_core_srq *srq;
1128 case MLX5_WQE_PF_TYPE_RMP:
1129 srq = mlx5_cmd_get_srq(dev, wq_num);
1131 common = &srq->common;
1133 case MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE:
1134 case MLX5_WQE_PF_TYPE_RESP:
1135 case MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC:
1136 common = mlx5_core_res_hold(dev->mdev, wq_num, MLX5_RES_QP);
1145 static inline struct mlx5_ib_qp *res_to_qp(struct mlx5_core_rsc_common *res)
1147 struct mlx5_core_qp *mqp = (struct mlx5_core_qp *)res;
1149 return to_mibqp(mqp);
1152 static inline struct mlx5_ib_srq *res_to_srq(struct mlx5_core_rsc_common *res)
1154 struct mlx5_core_srq *msrq =
1155 container_of(res, struct mlx5_core_srq, common);
1157 return to_mibsrq(msrq);
1160 static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
1161 struct mlx5_pagefault *pfault)
1163 bool sq = pfault->type & MLX5_PFAULT_REQUESTOR;
1164 u16 wqe_index = pfault->wqe.wqe_index;
1165 void *wqe, *wqe_start = NULL, *wqe_end = NULL;
1166 u32 bytes_mapped, total_wqe_bytes;
1167 struct mlx5_core_rsc_common *res;
1168 int resume_with_error = 1;
1169 struct mlx5_ib_qp *qp;
1170 size_t bytes_copied;
1173 res = odp_get_rsc(dev, pfault->wqe.wq_num, pfault->type);
1175 mlx5_ib_dbg(dev, "wqe page fault for missing resource %d\n", pfault->wqe.wq_num);
1179 if (res->res != MLX5_RES_QP && res->res != MLX5_RES_SRQ &&
1180 res->res != MLX5_RES_XSRQ) {
1181 mlx5_ib_err(dev, "wqe page fault for unsupported type %d\n",
1183 goto resolve_page_fault;
1186 wqe_start = (void *)__get_free_page(GFP_KERNEL);
1188 mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n");
1189 goto resolve_page_fault;
1193 qp = (res->res == MLX5_RES_QP) ? res_to_qp(res) : NULL;
1195 ret = mlx5_ib_read_user_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE,
1199 ret = mlx5_ib_mr_initiator_pfault_handler(
1200 dev, pfault, qp, &wqe, &wqe_end, bytes_copied);
1201 } else if (qp && !sq) {
1202 ret = mlx5_ib_read_user_wqe_rq(qp, wqe_index, wqe, PAGE_SIZE,
1206 ret = mlx5_ib_mr_responder_pfault_handler_rq(
1207 dev, qp, wqe, &wqe_end, bytes_copied);
1209 struct mlx5_ib_srq *srq = res_to_srq(res);
1211 ret = mlx5_ib_read_user_wqe_srq(srq, wqe_index, wqe, PAGE_SIZE,
1215 ret = mlx5_ib_mr_responder_pfault_handler_srq(
1216 dev, srq, &wqe, &wqe_end, bytes_copied);
1219 if (ret < 0 || wqe >= wqe_end)
1220 goto resolve_page_fault;
1222 ret = pagefault_data_segments(dev, pfault, wqe, wqe_end, &bytes_mapped,
1223 &total_wqe_bytes, !sq);
1227 if (ret < 0 || total_wqe_bytes > bytes_mapped)
1228 goto resolve_page_fault;
1232 resume_with_error = 0;
1238 "Failed reading a WQE following page fault, error %d, wqe_index %x, qpn %x\n",
1239 ret, wqe_index, pfault->token);
1242 mlx5_ib_page_fault_resume(dev, pfault, resume_with_error);
1243 mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, type: 0x%x\n",
1244 pfault->wqe.wq_num, resume_with_error,
1246 mlx5_core_res_put(res);
1247 free_page((unsigned long)wqe_start);
1250 static int pages_in_range(u64 address, u32 length)
1252 return (ALIGN(address + length, PAGE_SIZE) -
1253 (address & PAGE_MASK)) >> PAGE_SHIFT;
1256 static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
1257 struct mlx5_pagefault *pfault)
1261 u32 prefetch_len = pfault->bytes_committed;
1262 int prefetch_activated = 0;
1263 u32 rkey = pfault->rdma.r_key;
1266 /* The RDMA responder handler handles the page fault in two parts.
1267 * First it brings the necessary pages for the current packet
1268 * (and uses the pfault context), and then (after resuming the QP)
1269 * prefetches more pages. The second operation cannot use the pfault
1270 * context and therefore uses the dummy_pfault context allocated on
1272 pfault->rdma.rdma_va += pfault->bytes_committed;
1273 pfault->rdma.rdma_op_len -= min(pfault->bytes_committed,
1274 pfault->rdma.rdma_op_len);
1275 pfault->bytes_committed = 0;
1277 address = pfault->rdma.rdma_va;
1278 length = pfault->rdma.rdma_op_len;
1280 /* For some operations, the hardware cannot tell the exact message
1281 * length, and in those cases it reports zero. Use prefetch
1284 prefetch_activated = 1;
1285 length = pfault->rdma.packet_size;
1286 prefetch_len = min(MAX_PREFETCH_LEN, prefetch_len);
1289 ret = pagefault_single_data_segment(dev, NULL, rkey, address, length,
1290 &pfault->bytes_committed, NULL);
1291 if (ret == -EAGAIN) {
1292 /* We're racing with an invalidation, don't prefetch */
1293 prefetch_activated = 0;
1294 } else if (ret < 0 || pages_in_range(address, length) > ret) {
1295 mlx5_ib_page_fault_resume(dev, pfault, 1);
1297 mlx5_ib_dbg(dev, "PAGE FAULT error %d. QP 0x%x, type: 0x%x\n",
1298 ret, pfault->token, pfault->type);
1302 mlx5_ib_page_fault_resume(dev, pfault, 0);
1303 mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x, type: 0x%x, prefetch_activated: %d\n",
1304 pfault->token, pfault->type,
1305 prefetch_activated);
1307 /* At this point, there might be a new pagefault already arriving in
1308 * the eq, switch to the dummy pagefault for the rest of the
1309 * processing. We're still OK with the objects being alive as the
1310 * work-queue is being fenced. */
1312 if (prefetch_activated) {
1313 u32 bytes_committed = 0;
1315 ret = pagefault_single_data_segment(dev, NULL, rkey, address,
1317 &bytes_committed, NULL);
1318 if (ret < 0 && ret != -EAGAIN) {
1319 mlx5_ib_dbg(dev, "Prefetch failed. ret: %d, QP 0x%x, address: 0x%.16llx, length = 0x%.16x\n",
1320 ret, pfault->token, address, prefetch_len);
1325 static void mlx5_ib_pfault(struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault)
1327 u8 event_subtype = pfault->event_subtype;
1329 switch (event_subtype) {
1330 case MLX5_PFAULT_SUBTYPE_WQE:
1331 mlx5_ib_mr_wqe_pfault_handler(dev, pfault);
1333 case MLX5_PFAULT_SUBTYPE_RDMA:
1334 mlx5_ib_mr_rdma_pfault_handler(dev, pfault);
1337 mlx5_ib_err(dev, "Invalid page fault event subtype: 0x%x\n",
1339 mlx5_ib_page_fault_resume(dev, pfault, 1);
1343 static void mlx5_ib_eqe_pf_action(struct work_struct *work)
1345 struct mlx5_pagefault *pfault = container_of(work,
1346 struct mlx5_pagefault,
1348 struct mlx5_ib_pf_eq *eq = pfault->eq;
1350 mlx5_ib_pfault(eq->dev, pfault);
1351 mempool_free(pfault, eq->pool);
1354 static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq)
1356 struct mlx5_eqe_page_fault *pf_eqe;
1357 struct mlx5_pagefault *pfault;
1358 struct mlx5_eqe *eqe;
1361 while ((eqe = mlx5_eq_get_eqe(eq->core, cc))) {
1362 pfault = mempool_alloc(eq->pool, GFP_ATOMIC);
1364 schedule_work(&eq->work);
1368 pf_eqe = &eqe->data.page_fault;
1369 pfault->event_subtype = eqe->sub_type;
1370 pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed);
1372 mlx5_ib_dbg(eq->dev,
1373 "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n",
1374 eqe->sub_type, pfault->bytes_committed);
1376 switch (eqe->sub_type) {
1377 case MLX5_PFAULT_SUBTYPE_RDMA:
1378 /* RDMA based event */
1380 be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24;
1382 be32_to_cpu(pf_eqe->rdma.pftype_token) &
1384 pfault->rdma.r_key =
1385 be32_to_cpu(pf_eqe->rdma.r_key);
1386 pfault->rdma.packet_size =
1387 be16_to_cpu(pf_eqe->rdma.packet_length);
1388 pfault->rdma.rdma_op_len =
1389 be32_to_cpu(pf_eqe->rdma.rdma_op_len);
1390 pfault->rdma.rdma_va =
1391 be64_to_cpu(pf_eqe->rdma.rdma_va);
1392 mlx5_ib_dbg(eq->dev,
1393 "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n",
1394 pfault->type, pfault->token,
1395 pfault->rdma.r_key);
1396 mlx5_ib_dbg(eq->dev,
1397 "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n",
1398 pfault->rdma.rdma_op_len,
1399 pfault->rdma.rdma_va);
1402 case MLX5_PFAULT_SUBTYPE_WQE:
1403 /* WQE based event */
1405 (be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7;
1407 be32_to_cpu(pf_eqe->wqe.token);
1408 pfault->wqe.wq_num =
1409 be32_to_cpu(pf_eqe->wqe.pftype_wq) &
1411 pfault->wqe.wqe_index =
1412 be16_to_cpu(pf_eqe->wqe.wqe_index);
1413 pfault->wqe.packet_size =
1414 be16_to_cpu(pf_eqe->wqe.packet_length);
1415 mlx5_ib_dbg(eq->dev,
1416 "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n",
1417 pfault->type, pfault->token,
1419 pfault->wqe.wqe_index);
1423 mlx5_ib_warn(eq->dev,
1424 "Unsupported page fault event sub-type: 0x%02hhx\n",
1426 /* Unsupported page faults should still be
1427 * resolved by the page fault handler
1432 INIT_WORK(&pfault->work, mlx5_ib_eqe_pf_action);
1433 queue_work(eq->wq, &pfault->work);
1435 cc = mlx5_eq_update_cc(eq->core, ++cc);
1438 mlx5_eq_update_ci(eq->core, cc, 1);
1441 static int mlx5_ib_eq_pf_int(struct notifier_block *nb, unsigned long type,
1444 struct mlx5_ib_pf_eq *eq =
1445 container_of(nb, struct mlx5_ib_pf_eq, irq_nb);
1446 unsigned long flags;
1448 if (spin_trylock_irqsave(&eq->lock, flags)) {
1449 mlx5_ib_eq_pf_process(eq);
1450 spin_unlock_irqrestore(&eq->lock, flags);
1452 schedule_work(&eq->work);
1458 /* mempool_refill() was proposed but unfortunately wasn't accepted
1459 * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html
1462 static void mempool_refill(mempool_t *pool)
1464 while (pool->curr_nr < pool->min_nr)
1465 mempool_free(mempool_alloc(pool, GFP_KERNEL), pool);
1468 static void mlx5_ib_eq_pf_action(struct work_struct *work)
1470 struct mlx5_ib_pf_eq *eq =
1471 container_of(work, struct mlx5_ib_pf_eq, work);
1473 mempool_refill(eq->pool);
1475 spin_lock_irq(&eq->lock);
1476 mlx5_ib_eq_pf_process(eq);
1477 spin_unlock_irq(&eq->lock);
1481 MLX5_IB_NUM_PF_EQE = 0x1000,
1482 MLX5_IB_NUM_PF_DRAIN = 64,
1486 mlx5_ib_create_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
1488 struct mlx5_eq_param param = {};
1491 INIT_WORK(&eq->work, mlx5_ib_eq_pf_action);
1492 spin_lock_init(&eq->lock);
1495 eq->pool = mempool_create_kmalloc_pool(MLX5_IB_NUM_PF_DRAIN,
1496 sizeof(struct mlx5_pagefault));
1500 eq->wq = alloc_workqueue("mlx5_ib_page_fault",
1501 WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM,
1508 eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
1509 param = (struct mlx5_eq_param) {
1511 .nent = MLX5_IB_NUM_PF_EQE,
1513 param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT;
1514 eq->core = mlx5_eq_create_generic(dev->mdev, ¶m);
1515 if (IS_ERR(eq->core)) {
1516 err = PTR_ERR(eq->core);
1519 err = mlx5_eq_enable(dev->mdev, eq->core, &eq->irq_nb);
1521 mlx5_ib_err(dev, "failed to enable odp EQ %d\n", err);
1527 mlx5_eq_destroy_generic(dev->mdev, eq->core);
1529 destroy_workqueue(eq->wq);
1531 mempool_destroy(eq->pool);
1536 mlx5_ib_destroy_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
1540 mlx5_eq_disable(dev->mdev, eq->core, &eq->irq_nb);
1541 err = mlx5_eq_destroy_generic(dev->mdev, eq->core);
1542 cancel_work_sync(&eq->work);
1543 destroy_workqueue(eq->wq);
1544 mempool_destroy(eq->pool);
1549 void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent)
1551 if (!(ent->dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1554 switch (ent->order - 2) {
1555 case MLX5_IMR_MTT_CACHE_ENTRY:
1556 ent->page = PAGE_SHIFT;
1557 ent->xlt = MLX5_IMR_MTT_ENTRIES *
1558 sizeof(struct mlx5_mtt) /
1559 MLX5_IB_UMR_OCTOWORD;
1560 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
1564 case MLX5_IMR_KSM_CACHE_ENTRY:
1565 ent->page = MLX5_KSM_PAGE_SHIFT;
1566 ent->xlt = mlx5_imr_ksm_entries *
1567 sizeof(struct mlx5_klm) /
1568 MLX5_IB_UMR_OCTOWORD;
1569 ent->access_mode = MLX5_MKC_ACCESS_MODE_KSM;
1575 static const struct ib_device_ops mlx5_ib_dev_odp_ops = {
1576 .advise_mr = mlx5_ib_advise_mr,
1577 .invalidate_range = mlx5_ib_invalidate_range,
1580 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
1584 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
1587 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops);
1589 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) {
1590 ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey);
1592 mlx5_ib_err(dev, "Error getting null_mkey %d\n", ret);
1597 ret = mlx5_ib_create_pf_eq(dev, &dev->odp_pf_eq);
1602 void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev)
1604 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
1607 mlx5_ib_destroy_pf_eq(dev, &dev->odp_pf_eq);
1610 int mlx5_ib_odp_init(void)
1612 mlx5_imr_ksm_entries = BIT_ULL(get_order(TASK_SIZE) -
1618 struct prefetch_mr_work {
1619 struct work_struct work;
1624 struct mlx5_ib_mr *mr;
1629 static void destroy_prefetch_work(struct prefetch_mr_work *work)
1633 for (i = 0; i < work->num_sge; ++i)
1634 atomic_dec(&work->frags[i].mr->num_deferred_work);
1638 static struct mlx5_ib_mr *
1639 get_prefetchable_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
1642 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1643 struct mlx5_core_mkey *mmkey;
1644 struct ib_umem_odp *odp;
1645 struct mlx5_ib_mr *mr;
1647 lockdep_assert_held(&dev->odp_srcu);
1649 mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(lkey));
1650 if (!mmkey || mmkey->key != lkey || mmkey->type != MLX5_MKEY_MR)
1653 mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
1655 if (mr->ibmr.pd != pd)
1659 * Implicit child MRs are internal and userspace should not refer to
1665 odp = to_ib_umem_odp(mr->umem);
1667 /* prefetch with write-access must be supported by the MR */
1668 if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
1669 !odp->umem.writable)
1675 static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
1677 struct prefetch_mr_work *work =
1678 container_of(w, struct prefetch_mr_work, work);
1679 u32 bytes_mapped = 0;
1682 for (i = 0; i < work->num_sge; ++i)
1683 pagefault_mr(work->frags[i].mr, work->frags[i].io_virt,
1684 work->frags[i].length, &bytes_mapped,
1687 destroy_prefetch_work(work);
1690 static bool init_prefetch_work(struct ib_pd *pd,
1691 enum ib_uverbs_advise_mr_advice advice,
1692 u32 pf_flags, struct prefetch_mr_work *work,
1693 struct ib_sge *sg_list, u32 num_sge)
1697 INIT_WORK(&work->work, mlx5_ib_prefetch_mr_work);
1698 work->pf_flags = pf_flags;
1700 for (i = 0; i < num_sge; ++i) {
1701 work->frags[i].io_virt = sg_list[i].addr;
1702 work->frags[i].length = sg_list[i].length;
1704 get_prefetchable_mr(pd, advice, sg_list[i].lkey);
1705 if (!work->frags[i].mr) {
1706 work->num_sge = i - 1;
1708 destroy_prefetch_work(work);
1712 /* Keep the MR pointer will valid outside the SRCU */
1713 atomic_inc(&work->frags[i].mr->num_deferred_work);
1715 work->num_sge = num_sge;
1719 static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
1720 enum ib_uverbs_advise_mr_advice advice,
1721 u32 pf_flags, struct ib_sge *sg_list,
1724 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1725 u32 bytes_mapped = 0;
1730 srcu_key = srcu_read_lock(&dev->odp_srcu);
1731 for (i = 0; i < num_sge; ++i) {
1732 struct mlx5_ib_mr *mr;
1734 mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
1739 ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length,
1740 &bytes_mapped, pf_flags);
1747 srcu_read_unlock(&dev->odp_srcu, srcu_key);
1751 int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1752 enum ib_uverbs_advise_mr_advice advice,
1753 u32 flags, struct ib_sge *sg_list, u32 num_sge)
1755 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1757 struct prefetch_mr_work *work;
1760 if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH)
1761 pf_flags |= MLX5_PF_FLAGS_DOWNGRADE;
1763 if (flags & IB_UVERBS_ADVISE_MR_FLAG_FLUSH)
1764 return mlx5_ib_prefetch_sg_list(pd, advice, pf_flags, sg_list,
1767 work = kvzalloc(struct_size(work, frags, num_sge), GFP_KERNEL);
1771 srcu_key = srcu_read_lock(&dev->odp_srcu);
1772 if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) {
1773 srcu_read_unlock(&dev->odp_srcu, srcu_key);
1776 queue_work(system_unbound_wq, &work->work);
1777 srcu_read_unlock(&dev->odp_srcu, srcu_key);