2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_umem.h>
34 #include <rdma/ib_umem_odp.h>
35 #include <linux/kernel.h>
40 #define MAX_PREFETCH_LEN (4*1024*1024U)
42 /* Timeout in ms to wait for an active mmu notifier to complete when handling
44 #define MMU_NOTIFIER_TIMEOUT 1000
46 #define MLX5_IMR_MTT_BITS (30 - PAGE_SHIFT)
47 #define MLX5_IMR_MTT_SHIFT (MLX5_IMR_MTT_BITS + PAGE_SHIFT)
48 #define MLX5_IMR_MTT_ENTRIES BIT_ULL(MLX5_IMR_MTT_BITS)
49 #define MLX5_IMR_MTT_SIZE BIT_ULL(MLX5_IMR_MTT_SHIFT)
50 #define MLX5_IMR_MTT_MASK (~(MLX5_IMR_MTT_SIZE - 1))
52 #define MLX5_KSM_PAGE_SHIFT MLX5_IMR_MTT_SHIFT
54 static u64 mlx5_imr_ksm_entries;
56 static int check_parent(struct ib_umem_odp *odp,
57 struct mlx5_ib_mr *parent)
59 struct mlx5_ib_mr *mr = odp->private;
61 return mr && mr->parent == parent && !odp->dying;
64 struct ib_ucontext_per_mm *mr_to_per_mm(struct mlx5_ib_mr *mr)
66 if (WARN_ON(!mr || !mr->umem || !mr->umem->is_odp))
69 return to_ib_umem_odp(mr->umem)->per_mm;
72 static struct ib_umem_odp *odp_next(struct ib_umem_odp *odp)
74 struct mlx5_ib_mr *mr = odp->private, *parent = mr->parent;
75 struct ib_ucontext_per_mm *per_mm = odp->per_mm;
78 down_read(&per_mm->umem_rwsem);
80 rb = rb_next(&odp->interval_tree.rb);
83 odp = rb_entry(rb, struct ib_umem_odp, interval_tree.rb);
84 if (check_parent(odp, parent))
90 up_read(&per_mm->umem_rwsem);
94 static struct ib_umem_odp *odp_lookup(u64 start, u64 length,
95 struct mlx5_ib_mr *parent)
97 struct ib_ucontext_per_mm *per_mm = mr_to_per_mm(parent);
98 struct ib_umem_odp *odp;
101 down_read(&per_mm->umem_rwsem);
102 odp = rbt_ib_umem_lookup(&per_mm->umem_tree, start, length);
107 if (check_parent(odp, parent))
109 rb = rb_next(&odp->interval_tree.rb);
112 odp = rb_entry(rb, struct ib_umem_odp, interval_tree.rb);
113 if (ib_umem_start(&odp->umem) > start + length)
119 up_read(&per_mm->umem_rwsem);
123 void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
124 size_t nentries, struct mlx5_ib_mr *mr, int flags)
126 struct ib_pd *pd = mr->ibmr.pd;
127 struct mlx5_ib_dev *dev = to_mdev(pd->device);
128 struct ib_umem_odp *odp;
132 if (flags & MLX5_IB_UPD_XLT_ZAP) {
133 for (i = 0; i < nentries; i++, pklm++) {
134 pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
135 pklm->key = cpu_to_be32(dev->null_mkey);
141 odp = odp_lookup(offset * MLX5_IMR_MTT_SIZE,
142 nentries * MLX5_IMR_MTT_SIZE, mr);
144 for (i = 0; i < nentries; i++, pklm++) {
145 pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
146 va = (offset + i) * MLX5_IMR_MTT_SIZE;
147 if (odp && odp->umem.address == va) {
148 struct mlx5_ib_mr *mtt = odp->private;
150 pklm->key = cpu_to_be32(mtt->ibmr.lkey);
153 pklm->key = cpu_to_be32(dev->null_mkey);
155 mlx5_ib_dbg(dev, "[%d] va %lx key %x\n",
156 i, va, be32_to_cpu(pklm->key));
160 static void mr_leaf_free_action(struct work_struct *work)
162 struct ib_umem_odp *odp = container_of(work, struct ib_umem_odp, work);
163 int idx = ib_umem_start(&odp->umem) >> MLX5_IMR_MTT_SHIFT;
164 struct mlx5_ib_mr *mr = odp->private, *imr = mr->parent;
167 synchronize_srcu(&mr->dev->mr_srcu);
169 ib_umem_release(&odp->umem);
171 mlx5_ib_update_xlt(imr, idx, 1, 0,
172 MLX5_IB_UPD_XLT_INDIRECT |
173 MLX5_IB_UPD_XLT_ATOMIC);
174 mlx5_mr_cache_free(mr->dev, mr);
176 if (atomic_dec_and_test(&imr->num_leaf_free))
177 wake_up(&imr->q_leaf_free);
180 void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
183 struct mlx5_ib_mr *mr;
184 const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT /
185 sizeof(struct mlx5_mtt)) - 1;
186 u64 idx = 0, blk_start_idx = 0;
187 struct ib_umem *umem;
192 pr_err("invalidation called on NULL umem or non-ODP umem\n");
195 umem = &umem_odp->umem;
197 mr = umem_odp->private;
199 if (!mr || !mr->ibmr.pd)
202 start = max_t(u64, ib_umem_start(umem), start);
203 end = min_t(u64, ib_umem_end(umem), end);
206 * Iteration one - zap the HW's MTTs. The notifiers_count ensures that
207 * while we are doing the invalidation, no page fault will attempt to
208 * overwrite the same MTTs. Concurent invalidations might race us,
209 * but they will write 0s as well, so no difference in the end result.
212 for (addr = start; addr < end; addr += BIT(umem->page_shift)) {
213 idx = (addr - ib_umem_start(umem)) >> umem->page_shift;
215 * Strive to write the MTTs in chunks, but avoid overwriting
216 * non-existing MTTs. The huristic here can be improved to
217 * estimate the cost of another UMR vs. the cost of bigger
220 if (umem_odp->dma_list[idx] &
221 (ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) {
227 u64 umr_offset = idx & umr_block_mask;
229 if (in_block && umr_offset == 0) {
230 mlx5_ib_update_xlt(mr, blk_start_idx,
231 idx - blk_start_idx, 0,
232 MLX5_IB_UPD_XLT_ZAP |
233 MLX5_IB_UPD_XLT_ATOMIC);
239 mlx5_ib_update_xlt(mr, blk_start_idx,
240 idx - blk_start_idx + 1, 0,
241 MLX5_IB_UPD_XLT_ZAP |
242 MLX5_IB_UPD_XLT_ATOMIC);
244 * We are now sure that the device will not access the
245 * memory. We can safely unmap it, and mark it as dirty if
249 ib_umem_odp_unmap_dma_pages(umem_odp, start, end);
251 if (unlikely(!umem->npages && mr->parent &&
253 WRITE_ONCE(umem_odp->dying, 1);
254 atomic_inc(&mr->parent->num_leaf_free);
255 schedule_work(&umem_odp->work);
259 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
261 struct ib_odp_caps *caps = &dev->odp_caps;
263 memset(caps, 0, sizeof(*caps));
265 if (!MLX5_CAP_GEN(dev->mdev, pg))
268 caps->general_caps = IB_ODP_SUPPORT;
270 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
271 dev->odp_max_size = U64_MAX;
273 dev->odp_max_size = BIT_ULL(MLX5_MAX_UMR_SHIFT + PAGE_SHIFT);
275 if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send))
276 caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
278 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.send))
279 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND;
281 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.receive))
282 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV;
284 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.write))
285 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE;
287 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read))
288 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
290 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.atomic))
291 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
293 if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) &&
294 MLX5_CAP_GEN(dev->mdev, null_mkey) &&
295 MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
296 caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT;
301 static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
302 struct mlx5_pagefault *pfault,
305 int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ?
306 pfault->wqe.wq_num : pfault->token;
307 int ret = mlx5_core_page_fault_resume(dev->mdev,
313 mlx5_ib_err(dev, "Failed to resolve the page fault on WQ 0x%x\n",
317 static struct mlx5_ib_mr *implicit_mr_alloc(struct ib_pd *pd,
318 struct ib_umem *umem,
319 bool ksm, int access_flags)
321 struct mlx5_ib_dev *dev = to_mdev(pd->device);
322 struct mlx5_ib_mr *mr;
325 mr = mlx5_mr_cache_alloc(dev, ksm ? MLX5_IMR_KSM_CACHE_ENTRY :
326 MLX5_IMR_MTT_CACHE_ENTRY);
334 mr->access_flags = access_flags;
339 err = mlx5_ib_update_xlt(mr, 0,
340 mlx5_imr_ksm_entries,
342 MLX5_IB_UPD_XLT_INDIRECT |
343 MLX5_IB_UPD_XLT_ZAP |
344 MLX5_IB_UPD_XLT_ENABLE);
347 err = mlx5_ib_update_xlt(mr, 0,
348 MLX5_IMR_MTT_ENTRIES,
350 MLX5_IB_UPD_XLT_ZAP |
351 MLX5_IB_UPD_XLT_ENABLE |
352 MLX5_IB_UPD_XLT_ATOMIC);
358 mr->ibmr.lkey = mr->mmkey.key;
359 mr->ibmr.rkey = mr->mmkey.key;
363 mlx5_ib_dbg(dev, "key %x dev %p mr %p\n",
364 mr->mmkey.key, dev->mdev, mr);
369 mlx5_ib_err(dev, "Failed to register MKEY %d\n", err);
370 mlx5_mr_cache_free(dev, mr);
375 static struct ib_umem_odp *implicit_mr_get_data(struct mlx5_ib_mr *mr,
376 u64 io_virt, size_t bcnt)
378 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.pd->device);
379 struct ib_umem_odp *odp, *result = NULL;
380 struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
381 u64 addr = io_virt & MLX5_IMR_MTT_MASK;
382 int nentries = 0, start_idx = 0, ret;
383 struct mlx5_ib_mr *mtt;
385 mutex_lock(&odp_mr->umem_mutex);
386 odp = odp_lookup(addr, 1, mr);
388 mlx5_ib_dbg(dev, "io_virt:%llx bcnt:%zx addr:%llx odp:%p\n",
389 io_virt, bcnt, addr, odp);
396 odp = ib_alloc_odp_umem(odp_mr->per_mm, addr,
399 mutex_unlock(&odp_mr->umem_mutex);
400 return ERR_CAST(odp);
403 mtt = implicit_mr_alloc(mr->ibmr.pd, &odp->umem, 0,
406 mutex_unlock(&odp_mr->umem_mutex);
407 ib_umem_release(&odp->umem);
408 return ERR_CAST(mtt);
412 mtt->umem = &odp->umem;
413 mtt->mmkey.iova = addr;
415 INIT_WORK(&odp->work, mr_leaf_free_action);
418 start_idx = addr >> MLX5_IMR_MTT_SHIFT;
422 /* Return first odp if region not covered by single one */
426 addr += MLX5_IMR_MTT_SIZE;
427 if (unlikely(addr < io_virt + bcnt)) {
429 if (odp && odp->umem.address != addr)
434 if (unlikely(nentries)) {
435 ret = mlx5_ib_update_xlt(mr, start_idx, nentries, 0,
436 MLX5_IB_UPD_XLT_INDIRECT |
437 MLX5_IB_UPD_XLT_ATOMIC);
439 mlx5_ib_err(dev, "Failed to update PAS\n");
440 result = ERR_PTR(ret);
444 mutex_unlock(&odp_mr->umem_mutex);
448 struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
451 struct ib_ucontext *ctx = pd->ibpd.uobject->context;
452 struct mlx5_ib_mr *imr;
453 struct ib_umem *umem;
455 umem = ib_umem_get(ctx, 0, 0, IB_ACCESS_ON_DEMAND, 0);
457 return ERR_CAST(umem);
459 imr = implicit_mr_alloc(&pd->ibpd, umem, 1, access_flags);
461 ib_umem_release(umem);
462 return ERR_CAST(imr);
466 init_waitqueue_head(&imr->q_leaf_free);
467 atomic_set(&imr->num_leaf_free, 0);
472 static int mr_leaf_free(struct ib_umem_odp *umem_odp, u64 start, u64 end,
475 struct mlx5_ib_mr *mr = umem_odp->private, *imr = cookie;
476 struct ib_umem *umem = &umem_odp->umem;
478 if (mr->parent != imr)
481 ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem),
487 WRITE_ONCE(umem_odp->dying, 1);
488 atomic_inc(&imr->num_leaf_free);
489 schedule_work(&umem_odp->work);
494 void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
496 struct ib_ucontext_per_mm *per_mm = mr_to_per_mm(imr);
498 down_read(&per_mm->umem_rwsem);
499 rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, 0, ULLONG_MAX,
500 mr_leaf_free, true, imr);
501 up_read(&per_mm->umem_rwsem);
503 wait_event(imr->q_leaf_free, !atomic_read(&imr->num_leaf_free));
506 static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
507 u64 io_virt, size_t bcnt, u32 *bytes_mapped)
509 int npages = 0, current_seq, page_shift, ret, np;
510 bool implicit = false;
511 struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
512 u64 access_mask = ODP_READ_ALLOWED_BIT;
513 u64 start_idx, page_mask;
514 struct ib_umem_odp *odp;
517 if (!odp_mr->page_list) {
518 odp = implicit_mr_get_data(mr, io_virt, bcnt);
529 size = min_t(size_t, bcnt, ib_umem_end(&odp->umem) - io_virt);
531 page_shift = mr->umem->page_shift;
532 page_mask = ~(BIT(page_shift) - 1);
533 start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift;
535 if (mr->umem->writable)
536 access_mask |= ODP_WRITE_ALLOWED_BIT;
538 current_seq = READ_ONCE(odp->notifiers_seq);
540 * Ensure the sequence number is valid for some time before we call
545 ret = ib_umem_odp_map_dma_pages(to_ib_umem_odp(mr->umem), io_virt, size,
546 access_mask, current_seq);
553 mutex_lock(&odp->umem_mutex);
554 if (!ib_umem_mmu_notifier_retry(to_ib_umem_odp(mr->umem),
557 * No need to check whether the MTTs really belong to
558 * this MR, since ib_umem_odp_map_dma_pages already
561 ret = mlx5_ib_update_xlt(mr, start_idx, np,
562 page_shift, MLX5_IB_UPD_XLT_ATOMIC);
566 mutex_unlock(&odp->umem_mutex);
570 mlx5_ib_err(dev, "Failed to update mkey page tables\n");
575 u32 new_mappings = (np << page_shift) -
576 (io_virt - round_down(io_virt, 1 << page_shift));
577 *bytes_mapped += min_t(u32, new_mappings, size);
580 npages += np << (page_shift - PAGE_SHIFT);
583 if (unlikely(bcnt)) {
584 struct ib_umem_odp *next;
587 next = odp_next(odp);
588 if (unlikely(!next || next->umem.address != io_virt)) {
589 mlx5_ib_dbg(dev, "next implicit leaf removed at 0x%llx. got %p\n",
601 if (ret == -EAGAIN) {
602 if (implicit || !odp->dying) {
603 unsigned long timeout =
604 msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
606 if (!wait_for_completion_timeout(
607 &odp->notifier_completion,
609 mlx5_ib_warn(dev, "timeout waiting for mmu notifier. seq %d against %d\n",
610 current_seq, odp->notifiers_seq);
613 /* The MR is being killed, kill the QP as well. */
622 struct pf_frame *next;
630 * Handle a single data segment in a page-fault WQE or RDMA region.
632 * Returns number of OS pages retrieved on success. The caller may continue to
633 * the next data segment.
634 * Can return the following error codes:
635 * -EAGAIN to designate a temporary error. The caller will abort handling the
636 * page fault and resolve it.
637 * -EFAULT when there's an error mapping the requested pages. The caller will
638 * abort the page fault handling.
640 static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
641 u32 key, u64 io_virt, size_t bcnt,
642 u32 *bytes_committed,
645 int npages = 0, srcu_key, ret, i, outlen, cur_outlen = 0, depth = 0;
646 struct pf_frame *head = NULL, *frame;
647 struct mlx5_core_mkey *mmkey;
648 struct mlx5_ib_mw *mw;
649 struct mlx5_ib_mr *mr;
650 struct mlx5_klm *pklm;
654 srcu_key = srcu_read_lock(&dev->mr_srcu);
656 io_virt += *bytes_committed;
657 bcnt -= *bytes_committed;
660 mmkey = __mlx5_mr_lookup(dev->mdev, mlx5_base_mkey(key));
661 if (!mmkey || mmkey->key != key) {
662 mlx5_ib_dbg(dev, "failed to find mkey %x\n", key);
667 switch (mmkey->type) {
669 mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
670 if (!mr->live || !mr->ibmr.pd) {
671 mlx5_ib_dbg(dev, "got dead MR\n");
676 if (!mr->umem->is_odp) {
677 mlx5_ib_dbg(dev, "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
680 *bytes_mapped += bcnt;
685 ret = pagefault_mr(dev, mr, io_virt, bcnt, bytes_mapped);
694 mw = container_of(mmkey, struct mlx5_ib_mw, mmkey);
696 if (depth >= MLX5_CAP_GEN(dev->mdev, max_indirection)) {
697 mlx5_ib_dbg(dev, "indirection level exceeded\n");
702 outlen = MLX5_ST_SZ_BYTES(query_mkey_out) +
703 sizeof(*pklm) * (mw->ndescs - 2);
705 if (outlen > cur_outlen) {
707 out = kzalloc(outlen, GFP_KERNEL);
715 pklm = (struct mlx5_klm *)MLX5_ADDR_OF(query_mkey_out, out,
716 bsf0_klm0_pas_mtt0_1);
718 ret = mlx5_core_query_mkey(dev->mdev, &mw->mmkey, out, outlen);
722 offset = io_virt - MLX5_GET64(query_mkey_out, out,
723 memory_key_mkey_entry.start_addr);
725 for (i = 0; bcnt && i < mw->ndescs; i++, pklm++) {
726 if (offset >= be32_to_cpu(pklm->bcount)) {
727 offset -= be32_to_cpu(pklm->bcount);
731 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
737 frame->key = be32_to_cpu(pklm->key);
738 frame->io_virt = be64_to_cpu(pklm->va) + offset;
739 frame->bcnt = min_t(size_t, bcnt,
740 be32_to_cpu(pklm->bcount) - offset);
741 frame->depth = depth + 1;
751 mlx5_ib_dbg(dev, "wrong mkey type %d\n", mmkey->type);
761 io_virt = frame->io_virt;
763 depth = frame->depth;
777 srcu_read_unlock(&dev->mr_srcu, srcu_key);
778 *bytes_committed = 0;
779 return ret ? ret : npages;
783 * Parse a series of data segments for page fault handling.
785 * @qp the QP on which the fault occurred.
786 * @pfault contains page fault information.
787 * @wqe points at the first data segment in the WQE.
788 * @wqe_end points after the end of the WQE.
789 * @bytes_mapped receives the number of bytes that the function was able to
790 * map. This allows the caller to decide intelligently whether
791 * enough memory was mapped to resolve the page fault
792 * successfully (e.g. enough for the next MTU, or the entire
794 * @total_wqe_bytes receives the total data size of this WQE in bytes (minus
795 * the committed bytes).
797 * Returns the number of pages loaded if positive, zero for an empty WQE, or a
798 * negative error code.
800 static int pagefault_data_segments(struct mlx5_ib_dev *dev,
801 struct mlx5_pagefault *pfault,
802 struct mlx5_ib_qp *qp, void *wqe,
803 void *wqe_end, u32 *bytes_mapped,
804 u32 *total_wqe_bytes, int receive_queue)
806 int ret = 0, npages = 0;
813 /* Skip SRQ next-WQE segment. */
814 if (receive_queue && qp->ibqp.srq)
815 wqe += sizeof(struct mlx5_wqe_srq_next_seg);
820 *total_wqe_bytes = 0;
822 while (wqe < wqe_end) {
823 struct mlx5_wqe_data_seg *dseg = wqe;
825 io_virt = be64_to_cpu(dseg->addr);
826 key = be32_to_cpu(dseg->lkey);
827 byte_count = be32_to_cpu(dseg->byte_count);
828 inline_segment = !!(byte_count & MLX5_INLINE_SEG);
829 bcnt = byte_count & ~MLX5_INLINE_SEG;
831 if (inline_segment) {
832 bcnt = bcnt & MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK;
833 wqe += ALIGN(sizeof(struct mlx5_wqe_inline_seg) + bcnt,
836 wqe += sizeof(*dseg);
839 /* receive WQE end of sg list. */
840 if (receive_queue && bcnt == 0 && key == MLX5_INVALID_LKEY &&
844 if (!inline_segment && total_wqe_bytes) {
845 *total_wqe_bytes += bcnt - min_t(size_t, bcnt,
846 pfault->bytes_committed);
849 /* A zero length data segment designates a length of 2GB. */
853 if (inline_segment || bcnt <= pfault->bytes_committed) {
854 pfault->bytes_committed -=
856 pfault->bytes_committed);
860 ret = pagefault_single_data_segment(dev, key, io_virt, bcnt,
861 &pfault->bytes_committed,
868 return ret < 0 ? ret : npages;
871 static const u32 mlx5_ib_odp_opcode_cap[] = {
872 [MLX5_OPCODE_SEND] = IB_ODP_SUPPORT_SEND,
873 [MLX5_OPCODE_SEND_IMM] = IB_ODP_SUPPORT_SEND,
874 [MLX5_OPCODE_SEND_INVAL] = IB_ODP_SUPPORT_SEND,
875 [MLX5_OPCODE_RDMA_WRITE] = IB_ODP_SUPPORT_WRITE,
876 [MLX5_OPCODE_RDMA_WRITE_IMM] = IB_ODP_SUPPORT_WRITE,
877 [MLX5_OPCODE_RDMA_READ] = IB_ODP_SUPPORT_READ,
878 [MLX5_OPCODE_ATOMIC_CS] = IB_ODP_SUPPORT_ATOMIC,
879 [MLX5_OPCODE_ATOMIC_FA] = IB_ODP_SUPPORT_ATOMIC,
883 * Parse initiator WQE. Advances the wqe pointer to point at the
884 * scatter-gather list, and set wqe_end to the end of the WQE.
886 static int mlx5_ib_mr_initiator_pfault_handler(
887 struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault,
888 struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length)
890 struct mlx5_wqe_ctrl_seg *ctrl = *wqe;
891 u16 wqe_index = pfault->wqe.wqe_index;
893 struct mlx5_base_av *av;
896 u32 ctrl_wqe_index, ctrl_qpn;
898 u32 qpn = qp->trans_qp.base.mqp.qpn;
900 ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
901 if (ds * MLX5_WQE_DS_UNITS > wqe_length) {
902 mlx5_ib_err(dev, "Unable to read the complete WQE. ds = 0x%x, ret = 0x%x\n",
908 mlx5_ib_err(dev, "Got WQE with zero DS. wqe_index=%x, qpn=%x\n",
914 ctrl_wqe_index = (be32_to_cpu(ctrl->opmod_idx_opcode) &
915 MLX5_WQE_CTRL_WQE_INDEX_MASK) >>
916 MLX5_WQE_CTRL_WQE_INDEX_SHIFT;
917 if (wqe_index != ctrl_wqe_index) {
918 mlx5_ib_err(dev, "Got WQE with invalid wqe_index. wqe_index=0x%x, qpn=0x%x ctrl->wqe_index=0x%x\n",
924 ctrl_qpn = (be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_QPN_MASK) >>
925 MLX5_WQE_CTRL_QPN_SHIFT;
926 if (qpn != ctrl_qpn) {
927 mlx5_ib_err(dev, "Got WQE with incorrect QP number. wqe_index=0x%x, qpn=0x%x ctrl->qpn=0x%x\n",
934 *wqe_end = *wqe + ds * MLX5_WQE_DS_UNITS;
935 *wqe += sizeof(*ctrl);
937 opcode = be32_to_cpu(ctrl->opmod_idx_opcode) &
938 MLX5_WQE_CTRL_OPCODE_MASK;
940 switch (qp->ibqp.qp_type) {
942 transport_caps = dev->odp_caps.per_transport_caps.rc_odp_caps;
945 transport_caps = dev->odp_caps.per_transport_caps.ud_odp_caps;
948 mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport 0x%x\n",
953 if (unlikely(opcode >= ARRAY_SIZE(mlx5_ib_odp_opcode_cap) ||
954 !(transport_caps & mlx5_ib_odp_opcode_cap[opcode]))) {
955 mlx5_ib_err(dev, "ODP fault on QP of an unsupported opcode 0x%x\n",
960 if (qp->ibqp.qp_type != IB_QPT_RC) {
962 if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV))
963 *wqe += sizeof(struct mlx5_av);
965 *wqe += sizeof(struct mlx5_base_av);
969 case MLX5_OPCODE_RDMA_WRITE:
970 case MLX5_OPCODE_RDMA_WRITE_IMM:
971 case MLX5_OPCODE_RDMA_READ:
972 *wqe += sizeof(struct mlx5_wqe_raddr_seg);
974 case MLX5_OPCODE_ATOMIC_CS:
975 case MLX5_OPCODE_ATOMIC_FA:
976 *wqe += sizeof(struct mlx5_wqe_raddr_seg);
977 *wqe += sizeof(struct mlx5_wqe_atomic_seg);
985 * Parse responder WQE. Advances the wqe pointer to point at the
986 * scatter-gather list, and set wqe_end to the end of the WQE.
988 static int mlx5_ib_mr_responder_pfault_handler(
989 struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault,
990 struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length)
992 struct mlx5_ib_wq *wq = &qp->rq;
993 int wqe_size = 1 << wq->wqe_shift;
996 mlx5_ib_err(dev, "ODP fault on SRQ is not supported\n");
1001 mlx5_ib_err(dev, "ODP fault with WQE signatures is not supported\n");
1005 if (wqe_size > wqe_length) {
1006 mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n");
1010 switch (qp->ibqp.qp_type) {
1012 if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
1013 IB_ODP_SUPPORT_RECV))
1014 goto invalid_transport_or_opcode;
1017 invalid_transport_or_opcode:
1018 mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport. transport: 0x%x\n",
1023 *wqe_end = *wqe + wqe_size;
1028 static struct mlx5_ib_qp *mlx5_ib_odp_find_qp(struct mlx5_ib_dev *dev,
1031 struct mlx5_core_qp *mqp = __mlx5_qp_lookup(dev->mdev, wq_num);
1034 mlx5_ib_err(dev, "QPN 0x%6x not found\n", wq_num);
1038 return to_mibqp(mqp);
1041 static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
1042 struct mlx5_pagefault *pfault)
1045 void *wqe, *wqe_end;
1046 u32 bytes_mapped, total_wqe_bytes;
1047 char *buffer = NULL;
1048 int resume_with_error = 1;
1049 u16 wqe_index = pfault->wqe.wqe_index;
1050 int requestor = pfault->type & MLX5_PFAULT_REQUESTOR;
1051 struct mlx5_ib_qp *qp;
1053 buffer = (char *)__get_free_page(GFP_KERNEL);
1055 mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n");
1056 goto resolve_page_fault;
1059 qp = mlx5_ib_odp_find_qp(dev, pfault->wqe.wq_num);
1061 goto resolve_page_fault;
1063 ret = mlx5_ib_read_user_wqe(qp, requestor, wqe_index, buffer,
1064 PAGE_SIZE, &qp->trans_qp.base);
1066 mlx5_ib_err(dev, "Failed reading a WQE following page fault, error=%d, wqe_index=%x, qpn=%x\n",
1067 ret, wqe_index, pfault->token);
1068 goto resolve_page_fault;
1073 ret = mlx5_ib_mr_initiator_pfault_handler(dev, pfault, qp, &wqe,
1076 ret = mlx5_ib_mr_responder_pfault_handler(dev, pfault, qp, &wqe,
1079 goto resolve_page_fault;
1081 if (wqe >= wqe_end) {
1082 mlx5_ib_err(dev, "ODP fault on invalid WQE.\n");
1083 goto resolve_page_fault;
1086 ret = pagefault_data_segments(dev, pfault, qp, wqe, wqe_end,
1087 &bytes_mapped, &total_wqe_bytes,
1089 if (ret == -EAGAIN) {
1090 resume_with_error = 0;
1091 goto resolve_page_fault;
1092 } else if (ret < 0 || total_wqe_bytes > bytes_mapped) {
1093 goto resolve_page_fault;
1096 resume_with_error = 0;
1098 mlx5_ib_page_fault_resume(dev, pfault, resume_with_error);
1099 mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, type: 0x%x\n",
1100 pfault->wqe.wq_num, resume_with_error,
1102 free_page((unsigned long)buffer);
1105 static int pages_in_range(u64 address, u32 length)
1107 return (ALIGN(address + length, PAGE_SIZE) -
1108 (address & PAGE_MASK)) >> PAGE_SHIFT;
1111 static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
1112 struct mlx5_pagefault *pfault)
1116 u32 prefetch_len = pfault->bytes_committed;
1117 int prefetch_activated = 0;
1118 u32 rkey = pfault->rdma.r_key;
1121 /* The RDMA responder handler handles the page fault in two parts.
1122 * First it brings the necessary pages for the current packet
1123 * (and uses the pfault context), and then (after resuming the QP)
1124 * prefetches more pages. The second operation cannot use the pfault
1125 * context and therefore uses the dummy_pfault context allocated on
1127 pfault->rdma.rdma_va += pfault->bytes_committed;
1128 pfault->rdma.rdma_op_len -= min(pfault->bytes_committed,
1129 pfault->rdma.rdma_op_len);
1130 pfault->bytes_committed = 0;
1132 address = pfault->rdma.rdma_va;
1133 length = pfault->rdma.rdma_op_len;
1135 /* For some operations, the hardware cannot tell the exact message
1136 * length, and in those cases it reports zero. Use prefetch
1139 prefetch_activated = 1;
1140 length = pfault->rdma.packet_size;
1141 prefetch_len = min(MAX_PREFETCH_LEN, prefetch_len);
1144 ret = pagefault_single_data_segment(dev, rkey, address, length,
1145 &pfault->bytes_committed, NULL);
1146 if (ret == -EAGAIN) {
1147 /* We're racing with an invalidation, don't prefetch */
1148 prefetch_activated = 0;
1149 } else if (ret < 0 || pages_in_range(address, length) > ret) {
1150 mlx5_ib_page_fault_resume(dev, pfault, 1);
1152 mlx5_ib_dbg(dev, "PAGE FAULT error %d. QP 0x%x, type: 0x%x\n",
1153 ret, pfault->token, pfault->type);
1157 mlx5_ib_page_fault_resume(dev, pfault, 0);
1158 mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x, type: 0x%x, prefetch_activated: %d\n",
1159 pfault->token, pfault->type,
1160 prefetch_activated);
1162 /* At this point, there might be a new pagefault already arriving in
1163 * the eq, switch to the dummy pagefault for the rest of the
1164 * processing. We're still OK with the objects being alive as the
1165 * work-queue is being fenced. */
1167 if (prefetch_activated) {
1168 u32 bytes_committed = 0;
1170 ret = pagefault_single_data_segment(dev, rkey, address,
1172 &bytes_committed, NULL);
1173 if (ret < 0 && ret != -EAGAIN) {
1174 mlx5_ib_dbg(dev, "Prefetch failed. ret: %d, QP 0x%x, address: 0x%.16llx, length = 0x%.16x\n",
1175 ret, pfault->token, address, prefetch_len);
1180 void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context,
1181 struct mlx5_pagefault *pfault)
1183 struct mlx5_ib_dev *dev = context;
1184 u8 event_subtype = pfault->event_subtype;
1186 switch (event_subtype) {
1187 case MLX5_PFAULT_SUBTYPE_WQE:
1188 mlx5_ib_mr_wqe_pfault_handler(dev, pfault);
1190 case MLX5_PFAULT_SUBTYPE_RDMA:
1191 mlx5_ib_mr_rdma_pfault_handler(dev, pfault);
1194 mlx5_ib_err(dev, "Invalid page fault event subtype: 0x%x\n",
1196 mlx5_ib_page_fault_resume(dev, pfault, 1);
1200 void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent)
1202 if (!(ent->dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1205 switch (ent->order - 2) {
1206 case MLX5_IMR_MTT_CACHE_ENTRY:
1207 ent->page = PAGE_SHIFT;
1208 ent->xlt = MLX5_IMR_MTT_ENTRIES *
1209 sizeof(struct mlx5_mtt) /
1210 MLX5_IB_UMR_OCTOWORD;
1211 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
1215 case MLX5_IMR_KSM_CACHE_ENTRY:
1216 ent->page = MLX5_KSM_PAGE_SHIFT;
1217 ent->xlt = mlx5_imr_ksm_entries *
1218 sizeof(struct mlx5_klm) /
1219 MLX5_IB_UMR_OCTOWORD;
1220 ent->access_mode = MLX5_MKC_ACCESS_MODE_KSM;
1226 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
1230 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) {
1231 ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey);
1233 mlx5_ib_err(dev, "Error getting null_mkey %d\n", ret);
1241 int mlx5_ib_odp_init(void)
1243 mlx5_imr_ksm_entries = BIT_ULL(get_order(TASK_SIZE) -