2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_umem.h>
34 #include <rdma/ib_umem_odp.h>
39 #define MAX_PREFETCH_LEN (4*1024*1024U)
41 /* Timeout in ms to wait for an active mmu notifier to complete when handling
43 #define MMU_NOTIFIER_TIMEOUT 1000
45 #define MLX5_IMR_MTT_BITS (30 - PAGE_SHIFT)
46 #define MLX5_IMR_MTT_SHIFT (MLX5_IMR_MTT_BITS + PAGE_SHIFT)
47 #define MLX5_IMR_MTT_ENTRIES BIT_ULL(MLX5_IMR_MTT_BITS)
48 #define MLX5_IMR_MTT_SIZE BIT_ULL(MLX5_IMR_MTT_SHIFT)
49 #define MLX5_IMR_MTT_MASK (~(MLX5_IMR_MTT_SIZE - 1))
51 #define MLX5_KSM_PAGE_SHIFT MLX5_IMR_MTT_SHIFT
53 static u64 mlx5_imr_ksm_entries;
55 static int check_parent(struct ib_umem_odp *odp,
56 struct mlx5_ib_mr *parent)
58 struct mlx5_ib_mr *mr = odp->private;
60 return mr && mr->parent == parent && !odp->dying;
63 static struct ib_umem_odp *odp_next(struct ib_umem_odp *odp)
65 struct mlx5_ib_mr *mr = odp->private, *parent = mr->parent;
66 struct ib_ucontext *ctx = odp->umem->context;
69 down_read(&ctx->umem_rwsem);
71 rb = rb_next(&odp->interval_tree.rb);
74 odp = rb_entry(rb, struct ib_umem_odp, interval_tree.rb);
75 if (check_parent(odp, parent))
81 up_read(&ctx->umem_rwsem);
85 static struct ib_umem_odp *odp_lookup(struct ib_ucontext *ctx,
86 u64 start, u64 length,
87 struct mlx5_ib_mr *parent)
89 struct ib_umem_odp *odp;
92 down_read(&ctx->umem_rwsem);
93 odp = rbt_ib_umem_lookup(&ctx->umem_tree, start, length);
98 if (check_parent(odp, parent))
100 rb = rb_next(&odp->interval_tree.rb);
103 odp = rb_entry(rb, struct ib_umem_odp, interval_tree.rb);
104 if (ib_umem_start(odp->umem) > start + length)
110 up_read(&ctx->umem_rwsem);
114 void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
115 size_t nentries, struct mlx5_ib_mr *mr, int flags)
117 struct ib_pd *pd = mr->ibmr.pd;
118 struct ib_ucontext *ctx = pd->uobject->context;
119 struct mlx5_ib_dev *dev = to_mdev(pd->device);
120 struct ib_umem_odp *odp;
124 if (flags & MLX5_IB_UPD_XLT_ZAP) {
125 for (i = 0; i < nentries; i++, pklm++) {
126 pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
127 pklm->key = cpu_to_be32(dev->null_mkey);
133 odp = odp_lookup(ctx, offset * MLX5_IMR_MTT_SIZE,
134 nentries * MLX5_IMR_MTT_SIZE, mr);
136 for (i = 0; i < nentries; i++, pklm++) {
137 pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
138 va = (offset + i) * MLX5_IMR_MTT_SIZE;
139 if (odp && odp->umem->address == va) {
140 struct mlx5_ib_mr *mtt = odp->private;
142 pklm->key = cpu_to_be32(mtt->ibmr.lkey);
145 pklm->key = cpu_to_be32(dev->null_mkey);
147 mlx5_ib_dbg(dev, "[%d] va %lx key %x\n",
148 i, va, be32_to_cpu(pklm->key));
152 static void mr_leaf_free_action(struct work_struct *work)
154 struct ib_umem_odp *odp = container_of(work, struct ib_umem_odp, work);
155 int idx = ib_umem_start(odp->umem) >> MLX5_IMR_MTT_SHIFT;
156 struct mlx5_ib_mr *mr = odp->private, *imr = mr->parent;
159 synchronize_srcu(&mr->dev->mr_srcu);
161 ib_umem_release(odp->umem);
163 mlx5_ib_update_xlt(imr, idx, 1, 0,
164 MLX5_IB_UPD_XLT_INDIRECT |
165 MLX5_IB_UPD_XLT_ATOMIC);
166 mlx5_mr_cache_free(mr->dev, mr);
168 if (atomic_dec_and_test(&imr->num_leaf_free))
169 wake_up(&imr->q_leaf_free);
172 void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
175 struct mlx5_ib_mr *mr;
176 const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT /
177 sizeof(struct mlx5_mtt)) - 1;
178 u64 idx = 0, blk_start_idx = 0;
182 if (!umem || !umem->odp_data) {
183 pr_err("invalidation called on NULL umem or non-ODP umem\n");
187 mr = umem->odp_data->private;
189 if (!mr || !mr->ibmr.pd)
192 start = max_t(u64, ib_umem_start(umem), start);
193 end = min_t(u64, ib_umem_end(umem), end);
196 * Iteration one - zap the HW's MTTs. The notifiers_count ensures that
197 * while we are doing the invalidation, no page fault will attempt to
198 * overwrite the same MTTs. Concurent invalidations might race us,
199 * but they will write 0s as well, so no difference in the end result.
202 for (addr = start; addr < end; addr += BIT(umem->page_shift)) {
203 idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
205 * Strive to write the MTTs in chunks, but avoid overwriting
206 * non-existing MTTs. The huristic here can be improved to
207 * estimate the cost of another UMR vs. the cost of bigger
210 if (umem->odp_data->dma_list[idx] &
211 (ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) {
217 u64 umr_offset = idx & umr_block_mask;
219 if (in_block && umr_offset == 0) {
220 mlx5_ib_update_xlt(mr, blk_start_idx,
223 MLX5_IB_UPD_XLT_ZAP |
224 MLX5_IB_UPD_XLT_ATOMIC);
230 mlx5_ib_update_xlt(mr, blk_start_idx,
231 idx - blk_start_idx + 1,
233 MLX5_IB_UPD_XLT_ZAP |
234 MLX5_IB_UPD_XLT_ATOMIC);
236 * We are now sure that the device will not access the
237 * memory. We can safely unmap it, and mark it as dirty if
241 ib_umem_odp_unmap_dma_pages(umem, start, end);
243 if (unlikely(!umem->npages && mr->parent &&
244 !umem->odp_data->dying)) {
245 WRITE_ONCE(umem->odp_data->dying, 1);
246 atomic_inc(&mr->parent->num_leaf_free);
247 schedule_work(&umem->odp_data->work);
251 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
253 struct ib_odp_caps *caps = &dev->odp_caps;
255 memset(caps, 0, sizeof(*caps));
257 if (!MLX5_CAP_GEN(dev->mdev, pg))
260 caps->general_caps = IB_ODP_SUPPORT;
262 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
263 dev->odp_max_size = U64_MAX;
265 dev->odp_max_size = BIT_ULL(MLX5_MAX_UMR_SHIFT + PAGE_SHIFT);
267 if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send))
268 caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
270 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.send))
271 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND;
273 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.receive))
274 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV;
276 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.write))
277 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE;
279 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read))
280 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
282 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.atomic))
283 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
285 if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) &&
286 MLX5_CAP_GEN(dev->mdev, null_mkey) &&
287 MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
288 caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT;
293 static struct mlx5_ib_mr *mlx5_ib_odp_find_mr_lkey(struct mlx5_ib_dev *dev,
296 u32 base_key = mlx5_base_mkey(key);
297 struct mlx5_core_mkey *mmkey = __mlx5_mr_lookup(dev->mdev, base_key);
298 struct mlx5_ib_mr *mr;
300 if (!mmkey || mmkey->key != key || mmkey->type != MLX5_MKEY_MR)
303 mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
308 return container_of(mmkey, struct mlx5_ib_mr, mmkey);
311 static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
312 struct mlx5_pagefault *pfault,
315 int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ?
316 pfault->wqe.wq_num : pfault->token;
317 int ret = mlx5_core_page_fault_resume(dev->mdev,
323 mlx5_ib_err(dev, "Failed to resolve the page fault on WQ 0x%x\n",
327 static struct mlx5_ib_mr *implicit_mr_alloc(struct ib_pd *pd,
328 struct ib_umem *umem,
329 bool ksm, int access_flags)
331 struct mlx5_ib_dev *dev = to_mdev(pd->device);
332 struct mlx5_ib_mr *mr;
335 mr = mlx5_mr_cache_alloc(dev, ksm ? MLX5_IMR_KSM_CACHE_ENTRY :
336 MLX5_IMR_MTT_CACHE_ENTRY);
344 mr->access_flags = access_flags;
349 err = mlx5_ib_update_xlt(mr, 0,
350 mlx5_imr_ksm_entries,
352 MLX5_IB_UPD_XLT_INDIRECT |
353 MLX5_IB_UPD_XLT_ZAP |
354 MLX5_IB_UPD_XLT_ENABLE);
357 err = mlx5_ib_update_xlt(mr, 0,
358 MLX5_IMR_MTT_ENTRIES,
360 MLX5_IB_UPD_XLT_ZAP |
361 MLX5_IB_UPD_XLT_ENABLE |
362 MLX5_IB_UPD_XLT_ATOMIC);
368 mr->ibmr.lkey = mr->mmkey.key;
369 mr->ibmr.rkey = mr->mmkey.key;
373 mlx5_ib_dbg(dev, "key %x dev %p mr %p\n",
374 mr->mmkey.key, dev->mdev, mr);
379 mlx5_ib_err(dev, "Failed to register MKEY %d\n", err);
380 mlx5_mr_cache_free(dev, mr);
385 static struct ib_umem_odp *implicit_mr_get_data(struct mlx5_ib_mr *mr,
386 u64 io_virt, size_t bcnt)
388 struct ib_ucontext *ctx = mr->ibmr.pd->uobject->context;
389 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.pd->device);
390 struct ib_umem_odp *odp, *result = NULL;
391 u64 addr = io_virt & MLX5_IMR_MTT_MASK;
392 int nentries = 0, start_idx = 0, ret;
393 struct mlx5_ib_mr *mtt;
394 struct ib_umem *umem;
396 mutex_lock(&mr->umem->odp_data->umem_mutex);
397 odp = odp_lookup(ctx, addr, 1, mr);
399 mlx5_ib_dbg(dev, "io_virt:%llx bcnt:%zx addr:%llx odp:%p\n",
400 io_virt, bcnt, addr, odp);
407 umem = ib_alloc_odp_umem(ctx, addr, MLX5_IMR_MTT_SIZE);
409 mutex_unlock(&mr->umem->odp_data->umem_mutex);
410 return ERR_CAST(umem);
413 mtt = implicit_mr_alloc(mr->ibmr.pd, umem, 0, mr->access_flags);
415 mutex_unlock(&mr->umem->odp_data->umem_mutex);
416 ib_umem_release(umem);
417 return ERR_CAST(mtt);
420 odp = umem->odp_data;
423 mtt->mmkey.iova = addr;
425 INIT_WORK(&odp->work, mr_leaf_free_action);
428 start_idx = addr >> MLX5_IMR_MTT_SHIFT;
432 /* Return first odp if region not covered by single one */
436 addr += MLX5_IMR_MTT_SIZE;
437 if (unlikely(addr < io_virt + bcnt)) {
439 if (odp && odp->umem->address != addr)
444 if (unlikely(nentries)) {
445 ret = mlx5_ib_update_xlt(mr, start_idx, nentries, 0,
446 MLX5_IB_UPD_XLT_INDIRECT |
447 MLX5_IB_UPD_XLT_ATOMIC);
449 mlx5_ib_err(dev, "Failed to update PAS\n");
450 result = ERR_PTR(ret);
454 mutex_unlock(&mr->umem->odp_data->umem_mutex);
458 struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
461 struct ib_ucontext *ctx = pd->ibpd.uobject->context;
462 struct mlx5_ib_mr *imr;
463 struct ib_umem *umem;
465 umem = ib_umem_get(ctx, 0, 0, IB_ACCESS_ON_DEMAND, 0);
467 return ERR_CAST(umem);
469 imr = implicit_mr_alloc(&pd->ibpd, umem, 1, access_flags);
471 ib_umem_release(umem);
472 return ERR_CAST(imr);
476 init_waitqueue_head(&imr->q_leaf_free);
477 atomic_set(&imr->num_leaf_free, 0);
482 static int mr_leaf_free(struct ib_umem *umem, u64 start,
483 u64 end, void *cookie)
485 struct mlx5_ib_mr *mr = umem->odp_data->private, *imr = cookie;
487 if (mr->parent != imr)
490 ib_umem_odp_unmap_dma_pages(umem,
494 if (umem->odp_data->dying)
497 WRITE_ONCE(umem->odp_data->dying, 1);
498 atomic_inc(&imr->num_leaf_free);
499 schedule_work(&umem->odp_data->work);
504 void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
506 struct ib_ucontext *ctx = imr->ibmr.pd->uobject->context;
508 down_read(&ctx->umem_rwsem);
509 rbt_ib_umem_for_each_in_range(&ctx->umem_tree, 0, ULLONG_MAX,
511 up_read(&ctx->umem_rwsem);
513 wait_event(imr->q_leaf_free, !atomic_read(&imr->num_leaf_free));
517 * Handle a single data segment in a page-fault WQE or RDMA region.
519 * Returns number of pages retrieved on success. The caller may continue to
520 * the next data segment.
521 * Can return the following error codes:
522 * -EAGAIN to designate a temporary error. The caller will abort handling the
523 * page fault and resolve it.
524 * -EFAULT when there's an error mapping the requested pages. The caller will
525 * abort the page fault handling.
527 static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
528 u32 key, u64 io_virt, size_t bcnt,
529 u32 *bytes_committed,
533 unsigned int current_seq = 0;
535 int npages = 0, ret = 0;
536 struct mlx5_ib_mr *mr;
537 u64 access_mask = ODP_READ_ALLOWED_BIT;
538 struct ib_umem_odp *odp;
542 srcu_key = srcu_read_lock(&dev->mr_srcu);
543 mr = mlx5_ib_odp_find_mr_lkey(dev, key);
545 * If we didn't find the MR, it means the MR was closed while we were
546 * handling the ODP event. In this case we return -EFAULT so that the
549 if (!mr || !mr->ibmr.pd) {
550 mlx5_ib_dbg(dev, "Failed to find relevant mr for lkey=0x%06x, probably the MR was destroyed\n",
555 if (!mr->umem->odp_data) {
556 mlx5_ib_dbg(dev, "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
560 (bcnt - *bytes_committed);
565 * Avoid branches - this code will perform correctly
566 * in all iterations (in iteration 2 and above,
567 * bytes_committed == 0).
569 io_virt += *bytes_committed;
570 bcnt -= *bytes_committed;
572 if (!mr->umem->odp_data->page_list) {
573 odp = implicit_mr_get_data(mr, io_virt, bcnt);
583 odp = mr->umem->odp_data;
587 current_seq = READ_ONCE(odp->notifiers_seq);
589 * Ensure the sequence number is valid for some time before we call
594 size = min_t(size_t, bcnt, ib_umem_end(odp->umem) - io_virt);
595 start_idx = (io_virt - (mr->mmkey.iova & PAGE_MASK)) >> PAGE_SHIFT;
597 if (mr->umem->writable)
598 access_mask |= ODP_WRITE_ALLOWED_BIT;
600 ret = ib_umem_odp_map_dma_pages(mr->umem, io_virt, size,
601 access_mask, current_seq);
609 mutex_lock(&odp->umem_mutex);
610 if (!ib_umem_mmu_notifier_retry(mr->umem, current_seq)) {
612 * No need to check whether the MTTs really belong to
613 * this MR, since ib_umem_odp_map_dma_pages already
616 ret = mlx5_ib_update_xlt(mr, start_idx, np,
618 MLX5_IB_UPD_XLT_ATOMIC);
622 mutex_unlock(&odp->umem_mutex);
625 mlx5_ib_err(dev, "Failed to update mkey page tables\n");
630 u32 new_mappings = np * PAGE_SIZE -
631 (io_virt - round_down(io_virt, PAGE_SIZE));
632 *bytes_mapped += min_t(u32, new_mappings, size);
639 if (unlikely(bcnt)) {
640 struct ib_umem_odp *next;
643 next = odp_next(odp);
644 if (unlikely(!next || next->umem->address != io_virt)) {
645 mlx5_ib_dbg(dev, "next implicit leaf removed at 0x%llx. got %p\n",
648 goto srcu_unlock_no_wait;
656 if (ret == -EAGAIN) {
657 if (implicit || !odp->dying) {
658 unsigned long timeout =
659 msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
661 if (!wait_for_completion_timeout(
662 &odp->notifier_completion,
664 mlx5_ib_warn(dev, "timeout waiting for mmu notifier. seq %d against %d\n",
665 current_seq, odp->notifiers_seq);
668 /* The MR is being killed, kill the QP as well. */
674 srcu_read_unlock(&dev->mr_srcu, srcu_key);
675 *bytes_committed = 0;
676 return ret ? ret : npages;
680 * Parse a series of data segments for page fault handling.
682 * @qp the QP on which the fault occurred.
683 * @pfault contains page fault information.
684 * @wqe points at the first data segment in the WQE.
685 * @wqe_end points after the end of the WQE.
686 * @bytes_mapped receives the number of bytes that the function was able to
687 * map. This allows the caller to decide intelligently whether
688 * enough memory was mapped to resolve the page fault
689 * successfully (e.g. enough for the next MTU, or the entire
691 * @total_wqe_bytes receives the total data size of this WQE in bytes (minus
692 * the committed bytes).
694 * Returns the number of pages loaded if positive, zero for an empty WQE, or a
695 * negative error code.
697 static int pagefault_data_segments(struct mlx5_ib_dev *dev,
698 struct mlx5_pagefault *pfault,
699 struct mlx5_ib_qp *qp, void *wqe,
700 void *wqe_end, u32 *bytes_mapped,
701 u32 *total_wqe_bytes, int receive_queue)
703 int ret = 0, npages = 0;
710 /* Skip SRQ next-WQE segment. */
711 if (receive_queue && qp->ibqp.srq)
712 wqe += sizeof(struct mlx5_wqe_srq_next_seg);
717 *total_wqe_bytes = 0;
719 while (wqe < wqe_end) {
720 struct mlx5_wqe_data_seg *dseg = wqe;
722 io_virt = be64_to_cpu(dseg->addr);
723 key = be32_to_cpu(dseg->lkey);
724 byte_count = be32_to_cpu(dseg->byte_count);
725 inline_segment = !!(byte_count & MLX5_INLINE_SEG);
726 bcnt = byte_count & ~MLX5_INLINE_SEG;
728 if (inline_segment) {
729 bcnt = bcnt & MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK;
730 wqe += ALIGN(sizeof(struct mlx5_wqe_inline_seg) + bcnt,
733 wqe += sizeof(*dseg);
736 /* receive WQE end of sg list. */
737 if (receive_queue && bcnt == 0 && key == MLX5_INVALID_LKEY &&
741 if (!inline_segment && total_wqe_bytes) {
742 *total_wqe_bytes += bcnt - min_t(size_t, bcnt,
743 pfault->bytes_committed);
746 /* A zero length data segment designates a length of 2GB. */
750 if (inline_segment || bcnt <= pfault->bytes_committed) {
751 pfault->bytes_committed -=
753 pfault->bytes_committed);
757 ret = pagefault_single_data_segment(dev, key, io_virt, bcnt,
758 &pfault->bytes_committed,
765 return ret < 0 ? ret : npages;
768 static const u32 mlx5_ib_odp_opcode_cap[] = {
769 [MLX5_OPCODE_SEND] = IB_ODP_SUPPORT_SEND,
770 [MLX5_OPCODE_SEND_IMM] = IB_ODP_SUPPORT_SEND,
771 [MLX5_OPCODE_SEND_INVAL] = IB_ODP_SUPPORT_SEND,
772 [MLX5_OPCODE_RDMA_WRITE] = IB_ODP_SUPPORT_WRITE,
773 [MLX5_OPCODE_RDMA_WRITE_IMM] = IB_ODP_SUPPORT_WRITE,
774 [MLX5_OPCODE_RDMA_READ] = IB_ODP_SUPPORT_READ,
775 [MLX5_OPCODE_ATOMIC_CS] = IB_ODP_SUPPORT_ATOMIC,
776 [MLX5_OPCODE_ATOMIC_FA] = IB_ODP_SUPPORT_ATOMIC,
780 * Parse initiator WQE. Advances the wqe pointer to point at the
781 * scatter-gather list, and set wqe_end to the end of the WQE.
783 static int mlx5_ib_mr_initiator_pfault_handler(
784 struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault,
785 struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length)
787 struct mlx5_wqe_ctrl_seg *ctrl = *wqe;
788 u16 wqe_index = pfault->wqe.wqe_index;
790 struct mlx5_base_av *av;
793 u32 ctrl_wqe_index, ctrl_qpn;
795 u32 qpn = qp->trans_qp.base.mqp.qpn;
797 ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
798 if (ds * MLX5_WQE_DS_UNITS > wqe_length) {
799 mlx5_ib_err(dev, "Unable to read the complete WQE. ds = 0x%x, ret = 0x%x\n",
805 mlx5_ib_err(dev, "Got WQE with zero DS. wqe_index=%x, qpn=%x\n",
811 ctrl_wqe_index = (be32_to_cpu(ctrl->opmod_idx_opcode) &
812 MLX5_WQE_CTRL_WQE_INDEX_MASK) >>
813 MLX5_WQE_CTRL_WQE_INDEX_SHIFT;
814 if (wqe_index != ctrl_wqe_index) {
815 mlx5_ib_err(dev, "Got WQE with invalid wqe_index. wqe_index=0x%x, qpn=0x%x ctrl->wqe_index=0x%x\n",
821 ctrl_qpn = (be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_QPN_MASK) >>
822 MLX5_WQE_CTRL_QPN_SHIFT;
823 if (qpn != ctrl_qpn) {
824 mlx5_ib_err(dev, "Got WQE with incorrect QP number. wqe_index=0x%x, qpn=0x%x ctrl->qpn=0x%x\n",
831 *wqe_end = *wqe + ds * MLX5_WQE_DS_UNITS;
832 *wqe += sizeof(*ctrl);
834 opcode = be32_to_cpu(ctrl->opmod_idx_opcode) &
835 MLX5_WQE_CTRL_OPCODE_MASK;
837 switch (qp->ibqp.qp_type) {
839 transport_caps = dev->odp_caps.per_transport_caps.rc_odp_caps;
842 transport_caps = dev->odp_caps.per_transport_caps.ud_odp_caps;
845 mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport 0x%x\n",
850 if (unlikely(opcode >= sizeof(mlx5_ib_odp_opcode_cap) /
851 sizeof(mlx5_ib_odp_opcode_cap[0]) ||
852 !(transport_caps & mlx5_ib_odp_opcode_cap[opcode]))) {
853 mlx5_ib_err(dev, "ODP fault on QP of an unsupported opcode 0x%x\n",
858 if (qp->ibqp.qp_type != IB_QPT_RC) {
860 if (av->dqp_dct & be32_to_cpu(MLX5_WQE_AV_EXT))
861 *wqe += sizeof(struct mlx5_av);
863 *wqe += sizeof(struct mlx5_base_av);
867 case MLX5_OPCODE_RDMA_WRITE:
868 case MLX5_OPCODE_RDMA_WRITE_IMM:
869 case MLX5_OPCODE_RDMA_READ:
870 *wqe += sizeof(struct mlx5_wqe_raddr_seg);
872 case MLX5_OPCODE_ATOMIC_CS:
873 case MLX5_OPCODE_ATOMIC_FA:
874 *wqe += sizeof(struct mlx5_wqe_raddr_seg);
875 *wqe += sizeof(struct mlx5_wqe_atomic_seg);
883 * Parse responder WQE. Advances the wqe pointer to point at the
884 * scatter-gather list, and set wqe_end to the end of the WQE.
886 static int mlx5_ib_mr_responder_pfault_handler(
887 struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault,
888 struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length)
890 struct mlx5_ib_wq *wq = &qp->rq;
891 int wqe_size = 1 << wq->wqe_shift;
894 mlx5_ib_err(dev, "ODP fault on SRQ is not supported\n");
899 mlx5_ib_err(dev, "ODP fault with WQE signatures is not supported\n");
903 if (wqe_size > wqe_length) {
904 mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n");
908 switch (qp->ibqp.qp_type) {
910 if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
911 IB_ODP_SUPPORT_RECV))
912 goto invalid_transport_or_opcode;
915 invalid_transport_or_opcode:
916 mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport. transport: 0x%x\n",
921 *wqe_end = *wqe + wqe_size;
926 static struct mlx5_ib_qp *mlx5_ib_odp_find_qp(struct mlx5_ib_dev *dev,
929 struct mlx5_core_qp *mqp = __mlx5_qp_lookup(dev->mdev, wq_num);
932 mlx5_ib_err(dev, "QPN 0x%6x not found\n", wq_num);
936 return to_mibqp(mqp);
939 static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
940 struct mlx5_pagefault *pfault)
944 u32 bytes_mapped, total_wqe_bytes;
946 int resume_with_error = 1;
947 u16 wqe_index = pfault->wqe.wqe_index;
948 int requestor = pfault->type & MLX5_PFAULT_REQUESTOR;
949 struct mlx5_ib_qp *qp;
951 buffer = (char *)__get_free_page(GFP_KERNEL);
953 mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n");
954 goto resolve_page_fault;
957 qp = mlx5_ib_odp_find_qp(dev, pfault->wqe.wq_num);
959 goto resolve_page_fault;
961 ret = mlx5_ib_read_user_wqe(qp, requestor, wqe_index, buffer,
962 PAGE_SIZE, &qp->trans_qp.base);
964 mlx5_ib_err(dev, "Failed reading a WQE following page fault, error=%d, wqe_index=%x, qpn=%x\n",
965 ret, wqe_index, pfault->token);
966 goto resolve_page_fault;
971 ret = mlx5_ib_mr_initiator_pfault_handler(dev, pfault, qp, &wqe,
974 ret = mlx5_ib_mr_responder_pfault_handler(dev, pfault, qp, &wqe,
977 goto resolve_page_fault;
979 if (wqe >= wqe_end) {
980 mlx5_ib_err(dev, "ODP fault on invalid WQE.\n");
981 goto resolve_page_fault;
984 ret = pagefault_data_segments(dev, pfault, qp, wqe, wqe_end,
985 &bytes_mapped, &total_wqe_bytes,
987 if (ret == -EAGAIN) {
988 resume_with_error = 0;
989 goto resolve_page_fault;
990 } else if (ret < 0 || total_wqe_bytes > bytes_mapped) {
992 mlx5_ib_err(dev, "PAGE FAULT error: %d. QP 0x%x. type: 0x%x\n",
993 ret, pfault->wqe.wq_num, pfault->type);
994 goto resolve_page_fault;
997 resume_with_error = 0;
999 mlx5_ib_page_fault_resume(dev, pfault, resume_with_error);
1000 mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, type: 0x%x\n",
1001 pfault->wqe.wq_num, resume_with_error,
1003 free_page((unsigned long)buffer);
1006 static int pages_in_range(u64 address, u32 length)
1008 return (ALIGN(address + length, PAGE_SIZE) -
1009 (address & PAGE_MASK)) >> PAGE_SHIFT;
1012 static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
1013 struct mlx5_pagefault *pfault)
1017 u32 prefetch_len = pfault->bytes_committed;
1018 int prefetch_activated = 0;
1019 u32 rkey = pfault->rdma.r_key;
1022 /* The RDMA responder handler handles the page fault in two parts.
1023 * First it brings the necessary pages for the current packet
1024 * (and uses the pfault context), and then (after resuming the QP)
1025 * prefetches more pages. The second operation cannot use the pfault
1026 * context and therefore uses the dummy_pfault context allocated on
1028 pfault->rdma.rdma_va += pfault->bytes_committed;
1029 pfault->rdma.rdma_op_len -= min(pfault->bytes_committed,
1030 pfault->rdma.rdma_op_len);
1031 pfault->bytes_committed = 0;
1033 address = pfault->rdma.rdma_va;
1034 length = pfault->rdma.rdma_op_len;
1036 /* For some operations, the hardware cannot tell the exact message
1037 * length, and in those cases it reports zero. Use prefetch
1040 prefetch_activated = 1;
1041 length = pfault->rdma.packet_size;
1042 prefetch_len = min(MAX_PREFETCH_LEN, prefetch_len);
1045 ret = pagefault_single_data_segment(dev, rkey, address, length,
1046 &pfault->bytes_committed, NULL);
1047 if (ret == -EAGAIN) {
1048 /* We're racing with an invalidation, don't prefetch */
1049 prefetch_activated = 0;
1050 } else if (ret < 0 || pages_in_range(address, length) > ret) {
1051 mlx5_ib_page_fault_resume(dev, pfault, 1);
1053 mlx5_ib_warn(dev, "PAGE FAULT error %d. QP 0x%x, type: 0x%x\n",
1054 ret, pfault->token, pfault->type);
1058 mlx5_ib_page_fault_resume(dev, pfault, 0);
1059 mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x, type: 0x%x, prefetch_activated: %d\n",
1060 pfault->token, pfault->type,
1061 prefetch_activated);
1063 /* At this point, there might be a new pagefault already arriving in
1064 * the eq, switch to the dummy pagefault for the rest of the
1065 * processing. We're still OK with the objects being alive as the
1066 * work-queue is being fenced. */
1068 if (prefetch_activated) {
1069 u32 bytes_committed = 0;
1071 ret = pagefault_single_data_segment(dev, rkey, address,
1073 &bytes_committed, NULL);
1074 if (ret < 0 && ret != -EAGAIN) {
1075 mlx5_ib_warn(dev, "Prefetch failed. ret: %d, QP 0x%x, address: 0x%.16llx, length = 0x%.16x\n",
1076 ret, pfault->token, address, prefetch_len);
1081 void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context,
1082 struct mlx5_pagefault *pfault)
1084 struct mlx5_ib_dev *dev = context;
1085 u8 event_subtype = pfault->event_subtype;
1087 switch (event_subtype) {
1088 case MLX5_PFAULT_SUBTYPE_WQE:
1089 mlx5_ib_mr_wqe_pfault_handler(dev, pfault);
1091 case MLX5_PFAULT_SUBTYPE_RDMA:
1092 mlx5_ib_mr_rdma_pfault_handler(dev, pfault);
1095 mlx5_ib_err(dev, "Invalid page fault event subtype: 0x%x\n",
1097 mlx5_ib_page_fault_resume(dev, pfault, 1);
1101 void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent)
1103 if (!(ent->dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1106 switch (ent->order - 2) {
1107 case MLX5_IMR_MTT_CACHE_ENTRY:
1108 ent->page = PAGE_SHIFT;
1109 ent->xlt = MLX5_IMR_MTT_ENTRIES *
1110 sizeof(struct mlx5_mtt) /
1111 MLX5_IB_UMR_OCTOWORD;
1112 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
1116 case MLX5_IMR_KSM_CACHE_ENTRY:
1117 ent->page = MLX5_KSM_PAGE_SHIFT;
1118 ent->xlt = mlx5_imr_ksm_entries *
1119 sizeof(struct mlx5_klm) /
1120 MLX5_IB_UMR_OCTOWORD;
1121 ent->access_mode = MLX5_MKC_ACCESS_MODE_KSM;
1127 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
1131 ret = init_srcu_struct(&dev->mr_srcu);
1135 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) {
1136 ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey);
1138 mlx5_ib_err(dev, "Error getting null_mkey %d\n", ret);
1146 void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *dev)
1148 cleanup_srcu_struct(&dev->mr_srcu);
1151 int mlx5_ib_odp_init(void)
1153 mlx5_imr_ksm_entries = BIT_ULL(get_order(TASK_SIZE) -