Merge tag 'char-misc-5.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh...
[linux-2.6-microblaze.git] / drivers / infiniband / hw / mlx5 / odp.c
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <rdma/ib_umem.h>
34 #include <rdma/ib_umem_odp.h>
35 #include <linux/kernel.h>
36
37 #include "mlx5_ib.h"
38 #include "cmd.h"
39
40 #include <linux/mlx5/eq.h>
41
42 /* Contains the details of a pagefault. */
43 struct mlx5_pagefault {
44         u32                     bytes_committed;
45         u32                     token;
46         u8                      event_subtype;
47         u8                      type;
48         union {
49                 /* Initiator or send message responder pagefault details. */
50                 struct {
51                         /* Received packet size, only valid for responders. */
52                         u32     packet_size;
53                         /*
54                          * Number of resource holding WQE, depends on type.
55                          */
56                         u32     wq_num;
57                         /*
58                          * WQE index. Refers to either the send queue or
59                          * receive queue, according to event_subtype.
60                          */
61                         u16     wqe_index;
62                 } wqe;
63                 /* RDMA responder pagefault details */
64                 struct {
65                         u32     r_key;
66                         /*
67                          * Received packet size, minimal size page fault
68                          * resolution required for forward progress.
69                          */
70                         u32     packet_size;
71                         u32     rdma_op_len;
72                         u64     rdma_va;
73                 } rdma;
74         };
75
76         struct mlx5_ib_pf_eq    *eq;
77         struct work_struct      work;
78 };
79
80 #define MAX_PREFETCH_LEN (4*1024*1024U)
81
82 /* Timeout in ms to wait for an active mmu notifier to complete when handling
83  * a pagefault. */
84 #define MMU_NOTIFIER_TIMEOUT 1000
85
86 #define MLX5_IMR_MTT_BITS (30 - PAGE_SHIFT)
87 #define MLX5_IMR_MTT_SHIFT (MLX5_IMR_MTT_BITS + PAGE_SHIFT)
88 #define MLX5_IMR_MTT_ENTRIES BIT_ULL(MLX5_IMR_MTT_BITS)
89 #define MLX5_IMR_MTT_SIZE BIT_ULL(MLX5_IMR_MTT_SHIFT)
90 #define MLX5_IMR_MTT_MASK (~(MLX5_IMR_MTT_SIZE - 1))
91
92 #define MLX5_KSM_PAGE_SHIFT MLX5_IMR_MTT_SHIFT
93
94 static u64 mlx5_imr_ksm_entries;
95
96 static int check_parent(struct ib_umem_odp *odp,
97                                struct mlx5_ib_mr *parent)
98 {
99         struct mlx5_ib_mr *mr = odp->private;
100
101         return mr && mr->parent == parent && !odp->dying;
102 }
103
104 static struct ib_ucontext_per_mm *mr_to_per_mm(struct mlx5_ib_mr *mr)
105 {
106         if (WARN_ON(!mr || !is_odp_mr(mr)))
107                 return NULL;
108
109         return to_ib_umem_odp(mr->umem)->per_mm;
110 }
111
112 static struct ib_umem_odp *odp_next(struct ib_umem_odp *odp)
113 {
114         struct mlx5_ib_mr *mr = odp->private, *parent = mr->parent;
115         struct ib_ucontext_per_mm *per_mm = odp->per_mm;
116         struct rb_node *rb;
117
118         down_read(&per_mm->umem_rwsem);
119         while (1) {
120                 rb = rb_next(&odp->interval_tree.rb);
121                 if (!rb)
122                         goto not_found;
123                 odp = rb_entry(rb, struct ib_umem_odp, interval_tree.rb);
124                 if (check_parent(odp, parent))
125                         goto end;
126         }
127 not_found:
128         odp = NULL;
129 end:
130         up_read(&per_mm->umem_rwsem);
131         return odp;
132 }
133
134 static struct ib_umem_odp *odp_lookup(u64 start, u64 length,
135                                       struct mlx5_ib_mr *parent)
136 {
137         struct ib_ucontext_per_mm *per_mm = mr_to_per_mm(parent);
138         struct ib_umem_odp *odp;
139         struct rb_node *rb;
140
141         down_read(&per_mm->umem_rwsem);
142         odp = rbt_ib_umem_lookup(&per_mm->umem_tree, start, length);
143         if (!odp)
144                 goto end;
145
146         while (1) {
147                 if (check_parent(odp, parent))
148                         goto end;
149                 rb = rb_next(&odp->interval_tree.rb);
150                 if (!rb)
151                         goto not_found;
152                 odp = rb_entry(rb, struct ib_umem_odp, interval_tree.rb);
153                 if (ib_umem_start(odp) > start + length)
154                         goto not_found;
155         }
156 not_found:
157         odp = NULL;
158 end:
159         up_read(&per_mm->umem_rwsem);
160         return odp;
161 }
162
163 void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
164                            size_t nentries, struct mlx5_ib_mr *mr, int flags)
165 {
166         struct ib_pd *pd = mr->ibmr.pd;
167         struct mlx5_ib_dev *dev = to_mdev(pd->device);
168         struct ib_umem_odp *odp;
169         unsigned long va;
170         int i;
171
172         if (flags & MLX5_IB_UPD_XLT_ZAP) {
173                 for (i = 0; i < nentries; i++, pklm++) {
174                         pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
175                         pklm->key = cpu_to_be32(dev->null_mkey);
176                         pklm->va = 0;
177                 }
178                 return;
179         }
180
181         odp = odp_lookup(offset * MLX5_IMR_MTT_SIZE,
182                          nentries * MLX5_IMR_MTT_SIZE, mr);
183
184         for (i = 0; i < nentries; i++, pklm++) {
185                 pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
186                 va = (offset + i) * MLX5_IMR_MTT_SIZE;
187                 if (odp && ib_umem_start(odp) == va) {
188                         struct mlx5_ib_mr *mtt = odp->private;
189
190                         pklm->key = cpu_to_be32(mtt->ibmr.lkey);
191                         odp = odp_next(odp);
192                 } else {
193                         pklm->key = cpu_to_be32(dev->null_mkey);
194                 }
195                 mlx5_ib_dbg(dev, "[%d] va %lx key %x\n",
196                             i, va, be32_to_cpu(pklm->key));
197         }
198 }
199
200 static void mr_leaf_free_action(struct work_struct *work)
201 {
202         struct ib_umem_odp *odp = container_of(work, struct ib_umem_odp, work);
203         int idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
204         struct mlx5_ib_mr *mr = odp->private, *imr = mr->parent;
205
206         mr->parent = NULL;
207         synchronize_srcu(&mr->dev->mr_srcu);
208
209         ib_umem_odp_release(odp);
210         if (imr->live)
211                 mlx5_ib_update_xlt(imr, idx, 1, 0,
212                                    MLX5_IB_UPD_XLT_INDIRECT |
213                                    MLX5_IB_UPD_XLT_ATOMIC);
214         mlx5_mr_cache_free(mr->dev, mr);
215
216         if (atomic_dec_and_test(&imr->num_leaf_free))
217                 wake_up(&imr->q_leaf_free);
218 }
219
220 void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
221                               unsigned long end)
222 {
223         struct mlx5_ib_mr *mr;
224         const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT /
225                                     sizeof(struct mlx5_mtt)) - 1;
226         u64 idx = 0, blk_start_idx = 0;
227         int in_block = 0;
228         u64 addr;
229
230         if (!umem_odp) {
231                 pr_err("invalidation called on NULL umem or non-ODP umem\n");
232                 return;
233         }
234
235         mr = umem_odp->private;
236
237         if (!mr || !mr->ibmr.pd)
238                 return;
239
240         start = max_t(u64, ib_umem_start(umem_odp), start);
241         end = min_t(u64, ib_umem_end(umem_odp), end);
242
243         /*
244          * Iteration one - zap the HW's MTTs. The notifiers_count ensures that
245          * while we are doing the invalidation, no page fault will attempt to
246          * overwrite the same MTTs.  Concurent invalidations might race us,
247          * but they will write 0s as well, so no difference in the end result.
248          */
249         mutex_lock(&umem_odp->umem_mutex);
250         for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) {
251                 idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
252                 /*
253                  * Strive to write the MTTs in chunks, but avoid overwriting
254                  * non-existing MTTs. The huristic here can be improved to
255                  * estimate the cost of another UMR vs. the cost of bigger
256                  * UMR.
257                  */
258                 if (umem_odp->dma_list[idx] &
259                     (ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) {
260                         if (!in_block) {
261                                 blk_start_idx = idx;
262                                 in_block = 1;
263                         }
264                 } else {
265                         u64 umr_offset = idx & umr_block_mask;
266
267                         if (in_block && umr_offset == 0) {
268                                 mlx5_ib_update_xlt(mr, blk_start_idx,
269                                                    idx - blk_start_idx, 0,
270                                                    MLX5_IB_UPD_XLT_ZAP |
271                                                    MLX5_IB_UPD_XLT_ATOMIC);
272                                 in_block = 0;
273                         }
274                 }
275         }
276         if (in_block)
277                 mlx5_ib_update_xlt(mr, blk_start_idx,
278                                    idx - blk_start_idx + 1, 0,
279                                    MLX5_IB_UPD_XLT_ZAP |
280                                    MLX5_IB_UPD_XLT_ATOMIC);
281         mutex_unlock(&umem_odp->umem_mutex);
282         /*
283          * We are now sure that the device will not access the
284          * memory. We can safely unmap it, and mark it as dirty if
285          * needed.
286          */
287
288         ib_umem_odp_unmap_dma_pages(umem_odp, start, end);
289
290         if (unlikely(!umem_odp->npages && mr->parent &&
291                      !umem_odp->dying)) {
292                 WRITE_ONCE(umem_odp->dying, 1);
293                 atomic_inc(&mr->parent->num_leaf_free);
294                 schedule_work(&umem_odp->work);
295         }
296 }
297
298 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
299 {
300         struct ib_odp_caps *caps = &dev->odp_caps;
301
302         memset(caps, 0, sizeof(*caps));
303
304         if (!MLX5_CAP_GEN(dev->mdev, pg) ||
305             !mlx5_ib_can_use_umr(dev, true))
306                 return;
307
308         caps->general_caps = IB_ODP_SUPPORT;
309
310         if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
311                 dev->odp_max_size = U64_MAX;
312         else
313                 dev->odp_max_size = BIT_ULL(MLX5_MAX_UMR_SHIFT + PAGE_SHIFT);
314
315         if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send))
316                 caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
317
318         if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.srq_receive))
319                 caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
320
321         if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.send))
322                 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND;
323
324         if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.receive))
325                 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV;
326
327         if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.write))
328                 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE;
329
330         if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read))
331                 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
332
333         if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.atomic))
334                 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
335
336         if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.srq_receive))
337                 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
338
339         if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.send))
340                 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SEND;
341
342         if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.receive))
343                 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_RECV;
344
345         if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.write))
346                 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_WRITE;
347
348         if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.read))
349                 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_READ;
350
351         if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.atomic))
352                 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
353
354         if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.srq_receive))
355                 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
356
357         if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) &&
358             MLX5_CAP_GEN(dev->mdev, null_mkey) &&
359             MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
360             !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled))
361                 caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT;
362
363         return;
364 }
365
366 static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
367                                       struct mlx5_pagefault *pfault,
368                                       int error)
369 {
370         int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ?
371                      pfault->wqe.wq_num : pfault->token;
372         u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = { };
373         u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)]   = { };
374         int err;
375
376         MLX5_SET(page_fault_resume_in, in, opcode, MLX5_CMD_OP_PAGE_FAULT_RESUME);
377         MLX5_SET(page_fault_resume_in, in, page_fault_type, pfault->type);
378         MLX5_SET(page_fault_resume_in, in, token, pfault->token);
379         MLX5_SET(page_fault_resume_in, in, wq_number, wq_num);
380         MLX5_SET(page_fault_resume_in, in, error, !!error);
381
382         err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
383         if (err)
384                 mlx5_ib_err(dev, "Failed to resolve the page fault on WQ 0x%x err %d\n",
385                             wq_num, err);
386 }
387
388 static struct mlx5_ib_mr *implicit_mr_alloc(struct ib_pd *pd,
389                                             struct ib_umem_odp *umem_odp,
390                                             bool ksm, int access_flags)
391 {
392         struct mlx5_ib_dev *dev = to_mdev(pd->device);
393         struct mlx5_ib_mr *mr;
394         int err;
395
396         mr = mlx5_mr_cache_alloc(dev, ksm ? MLX5_IMR_KSM_CACHE_ENTRY :
397                                             MLX5_IMR_MTT_CACHE_ENTRY);
398
399         if (IS_ERR(mr))
400                 return mr;
401
402         mr->ibmr.pd = pd;
403
404         mr->dev = dev;
405         mr->access_flags = access_flags;
406         mr->mmkey.iova = 0;
407         mr->umem = &umem_odp->umem;
408
409         if (ksm) {
410                 err = mlx5_ib_update_xlt(mr, 0,
411                                          mlx5_imr_ksm_entries,
412                                          MLX5_KSM_PAGE_SHIFT,
413                                          MLX5_IB_UPD_XLT_INDIRECT |
414                                          MLX5_IB_UPD_XLT_ZAP |
415                                          MLX5_IB_UPD_XLT_ENABLE);
416
417         } else {
418                 err = mlx5_ib_update_xlt(mr, 0,
419                                          MLX5_IMR_MTT_ENTRIES,
420                                          PAGE_SHIFT,
421                                          MLX5_IB_UPD_XLT_ZAP |
422                                          MLX5_IB_UPD_XLT_ENABLE |
423                                          MLX5_IB_UPD_XLT_ATOMIC);
424         }
425
426         if (err)
427                 goto fail;
428
429         mr->ibmr.lkey = mr->mmkey.key;
430         mr->ibmr.rkey = mr->mmkey.key;
431
432         mr->live = 1;
433
434         mlx5_ib_dbg(dev, "key %x dev %p mr %p\n",
435                     mr->mmkey.key, dev->mdev, mr);
436
437         return mr;
438
439 fail:
440         mlx5_ib_err(dev, "Failed to register MKEY %d\n", err);
441         mlx5_mr_cache_free(dev, mr);
442
443         return ERR_PTR(err);
444 }
445
446 static struct ib_umem_odp *implicit_mr_get_data(struct mlx5_ib_mr *mr,
447                                                 u64 io_virt, size_t bcnt)
448 {
449         struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.pd->device);
450         struct ib_umem_odp *odp, *result = NULL;
451         struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
452         u64 addr = io_virt & MLX5_IMR_MTT_MASK;
453         int nentries = 0, start_idx = 0, ret;
454         struct mlx5_ib_mr *mtt;
455
456         mutex_lock(&odp_mr->umem_mutex);
457         odp = odp_lookup(addr, 1, mr);
458
459         mlx5_ib_dbg(dev, "io_virt:%llx bcnt:%zx addr:%llx odp:%p\n",
460                     io_virt, bcnt, addr, odp);
461
462 next_mr:
463         if (likely(odp)) {
464                 if (nentries)
465                         nentries++;
466         } else {
467                 odp = ib_umem_odp_alloc_child(odp_mr, addr, MLX5_IMR_MTT_SIZE);
468                 if (IS_ERR(odp)) {
469                         mutex_unlock(&odp_mr->umem_mutex);
470                         return ERR_CAST(odp);
471                 }
472
473                 mtt = implicit_mr_alloc(mr->ibmr.pd, odp, 0,
474                                         mr->access_flags);
475                 if (IS_ERR(mtt)) {
476                         mutex_unlock(&odp_mr->umem_mutex);
477                         ib_umem_odp_release(odp);
478                         return ERR_CAST(mtt);
479                 }
480
481                 odp->private = mtt;
482                 mtt->umem = &odp->umem;
483                 mtt->mmkey.iova = addr;
484                 mtt->parent = mr;
485                 INIT_WORK(&odp->work, mr_leaf_free_action);
486
487                 if (!nentries)
488                         start_idx = addr >> MLX5_IMR_MTT_SHIFT;
489                 nentries++;
490         }
491
492         /* Return first odp if region not covered by single one */
493         if (likely(!result))
494                 result = odp;
495
496         addr += MLX5_IMR_MTT_SIZE;
497         if (unlikely(addr < io_virt + bcnt)) {
498                 odp = odp_next(odp);
499                 if (odp && ib_umem_start(odp) != addr)
500                         odp = NULL;
501                 goto next_mr;
502         }
503
504         if (unlikely(nentries)) {
505                 ret = mlx5_ib_update_xlt(mr, start_idx, nentries, 0,
506                                          MLX5_IB_UPD_XLT_INDIRECT |
507                                          MLX5_IB_UPD_XLT_ATOMIC);
508                 if (ret) {
509                         mlx5_ib_err(dev, "Failed to update PAS\n");
510                         result = ERR_PTR(ret);
511                 }
512         }
513
514         mutex_unlock(&odp_mr->umem_mutex);
515         return result;
516 }
517
518 struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
519                                              struct ib_udata *udata,
520                                              int access_flags)
521 {
522         struct mlx5_ib_mr *imr;
523         struct ib_umem_odp *umem_odp;
524
525         umem_odp = ib_umem_odp_alloc_implicit(udata, access_flags);
526         if (IS_ERR(umem_odp))
527                 return ERR_CAST(umem_odp);
528
529         imr = implicit_mr_alloc(&pd->ibpd, umem_odp, 1, access_flags);
530         if (IS_ERR(imr)) {
531                 ib_umem_odp_release(umem_odp);
532                 return ERR_CAST(imr);
533         }
534
535         imr->umem = &umem_odp->umem;
536         init_waitqueue_head(&imr->q_leaf_free);
537         atomic_set(&imr->num_leaf_free, 0);
538         atomic_set(&imr->num_pending_prefetch, 0);
539
540         return imr;
541 }
542
543 void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
544 {
545         struct ib_ucontext_per_mm *per_mm = mr_to_per_mm(imr);
546         struct rb_node *node;
547
548         down_read(&per_mm->umem_rwsem);
549         for (node = rb_first_cached(&per_mm->umem_tree); node;
550              node = rb_next(node)) {
551                 struct ib_umem_odp *umem_odp =
552                         rb_entry(node, struct ib_umem_odp, interval_tree.rb);
553                 struct mlx5_ib_mr *mr = umem_odp->private;
554
555                 if (mr->parent != imr)
556                         continue;
557
558                 ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
559                                             ib_umem_end(umem_odp));
560
561                 if (umem_odp->dying)
562                         continue;
563
564                 WRITE_ONCE(umem_odp->dying, 1);
565                 atomic_inc(&imr->num_leaf_free);
566                 schedule_work(&umem_odp->work);
567         }
568         up_read(&per_mm->umem_rwsem);
569
570         wait_event(imr->q_leaf_free, !atomic_read(&imr->num_leaf_free));
571 }
572
573 #define MLX5_PF_FLAGS_PREFETCH  BIT(0)
574 #define MLX5_PF_FLAGS_DOWNGRADE BIT(1)
575 static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
576                         u64 io_virt, size_t bcnt, u32 *bytes_mapped,
577                         u32 flags)
578 {
579         int npages = 0, current_seq, page_shift, ret, np;
580         struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
581         bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
582         bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
583         u64 access_mask;
584         u64 start_idx, page_mask;
585         struct ib_umem_odp *odp;
586         size_t size;
587
588         if (odp_mr->is_implicit_odp) {
589                 odp = implicit_mr_get_data(mr, io_virt, bcnt);
590
591                 if (IS_ERR(odp))
592                         return PTR_ERR(odp);
593                 mr = odp->private;
594         } else {
595                 odp = odp_mr;
596         }
597
598 next_mr:
599         size = min_t(size_t, bcnt, ib_umem_end(odp) - io_virt);
600
601         page_shift = odp->page_shift;
602         page_mask = ~(BIT(page_shift) - 1);
603         start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift;
604         access_mask = ODP_READ_ALLOWED_BIT;
605
606         if (prefetch && !downgrade && !odp->umem.writable) {
607                 /* prefetch with write-access must
608                  * be supported by the MR
609                  */
610                 ret = -EINVAL;
611                 goto out;
612         }
613
614         if (odp->umem.writable && !downgrade)
615                 access_mask |= ODP_WRITE_ALLOWED_BIT;
616
617         current_seq = READ_ONCE(odp->notifiers_seq);
618         /*
619          * Ensure the sequence number is valid for some time before we call
620          * gup.
621          */
622         smp_rmb();
623
624         ret = ib_umem_odp_map_dma_pages(odp, io_virt, size, access_mask,
625                                         current_seq);
626
627         if (ret < 0)
628                 goto out;
629
630         np = ret;
631
632         mutex_lock(&odp->umem_mutex);
633         if (!ib_umem_mmu_notifier_retry(odp, current_seq)) {
634                 /*
635                  * No need to check whether the MTTs really belong to
636                  * this MR, since ib_umem_odp_map_dma_pages already
637                  * checks this.
638                  */
639                 ret = mlx5_ib_update_xlt(mr, start_idx, np,
640                                          page_shift, MLX5_IB_UPD_XLT_ATOMIC);
641         } else {
642                 ret = -EAGAIN;
643         }
644         mutex_unlock(&odp->umem_mutex);
645
646         if (ret < 0) {
647                 if (ret != -EAGAIN)
648                         mlx5_ib_err(dev, "Failed to update mkey page tables\n");
649                 goto out;
650         }
651
652         if (bytes_mapped) {
653                 u32 new_mappings = (np << page_shift) -
654                         (io_virt - round_down(io_virt, 1 << page_shift));
655                 *bytes_mapped += min_t(u32, new_mappings, size);
656         }
657
658         npages += np << (page_shift - PAGE_SHIFT);
659         bcnt -= size;
660
661         if (unlikely(bcnt)) {
662                 struct ib_umem_odp *next;
663
664                 io_virt += size;
665                 next = odp_next(odp);
666                 if (unlikely(!next || ib_umem_start(next) != io_virt)) {
667                         mlx5_ib_dbg(dev, "next implicit leaf removed at 0x%llx. got %p\n",
668                                     io_virt, next);
669                         return -EAGAIN;
670                 }
671                 odp = next;
672                 mr = odp->private;
673                 goto next_mr;
674         }
675
676         return npages;
677
678 out:
679         if (ret == -EAGAIN) {
680                 unsigned long timeout = msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
681
682                 if (!wait_for_completion_timeout(&odp->notifier_completion,
683                                                  timeout)) {
684                         mlx5_ib_warn(
685                                 dev,
686                                 "timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n",
687                                 current_seq, odp->notifiers_seq,
688                                 odp->notifiers_count);
689                 }
690         }
691
692         return ret;
693 }
694
695 struct pf_frame {
696         struct pf_frame *next;
697         u32 key;
698         u64 io_virt;
699         size_t bcnt;
700         int depth;
701 };
702
703 static bool mkey_is_eq(struct mlx5_core_mkey *mmkey, u32 key)
704 {
705         if (!mmkey)
706                 return false;
707         if (mmkey->type == MLX5_MKEY_MW)
708                 return mlx5_base_mkey(mmkey->key) == mlx5_base_mkey(key);
709         return mmkey->key == key;
710 }
711
712 static int get_indirect_num_descs(struct mlx5_core_mkey *mmkey)
713 {
714         struct mlx5_ib_mw *mw;
715         struct mlx5_ib_devx_mr *devx_mr;
716
717         if (mmkey->type == MLX5_MKEY_MW) {
718                 mw = container_of(mmkey, struct mlx5_ib_mw, mmkey);
719                 return mw->ndescs;
720         }
721
722         devx_mr = container_of(mmkey, struct mlx5_ib_devx_mr,
723                                mmkey);
724         return devx_mr->ndescs;
725 }
726
727 /*
728  * Handle a single data segment in a page-fault WQE or RDMA region.
729  *
730  * Returns number of OS pages retrieved on success. The caller may continue to
731  * the next data segment.
732  * Can return the following error codes:
733  * -EAGAIN to designate a temporary error. The caller will abort handling the
734  *  page fault and resolve it.
735  * -EFAULT when there's an error mapping the requested pages. The caller will
736  *  abort the page fault handling.
737  */
738 static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
739                                          struct ib_pd *pd, u32 key,
740                                          u64 io_virt, size_t bcnt,
741                                          u32 *bytes_committed,
742                                          u32 *bytes_mapped, u32 flags)
743 {
744         int npages = 0, srcu_key, ret, i, outlen, cur_outlen = 0, depth = 0;
745         bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
746         struct pf_frame *head = NULL, *frame;
747         struct mlx5_core_mkey *mmkey;
748         struct mlx5_ib_mr *mr;
749         struct mlx5_klm *pklm;
750         u32 *out = NULL;
751         size_t offset;
752         int ndescs;
753
754         srcu_key = srcu_read_lock(&dev->mr_srcu);
755
756         io_virt += *bytes_committed;
757         bcnt -= *bytes_committed;
758
759 next_mr:
760         mmkey = xa_load(&dev->mdev->priv.mkey_table, mlx5_base_mkey(key));
761         if (!mkey_is_eq(mmkey, key)) {
762                 mlx5_ib_dbg(dev, "failed to find mkey %x\n", key);
763                 ret = -EFAULT;
764                 goto srcu_unlock;
765         }
766
767         if (prefetch && mmkey->type != MLX5_MKEY_MR) {
768                 mlx5_ib_dbg(dev, "prefetch is allowed only for MR\n");
769                 ret = -EINVAL;
770                 goto srcu_unlock;
771         }
772
773         switch (mmkey->type) {
774         case MLX5_MKEY_MR:
775                 mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
776                 if (!mr->live || !mr->ibmr.pd) {
777                         mlx5_ib_dbg(dev, "got dead MR\n");
778                         ret = -EFAULT;
779                         goto srcu_unlock;
780                 }
781
782                 if (prefetch) {
783                         if (!is_odp_mr(mr) ||
784                             mr->ibmr.pd != pd) {
785                                 mlx5_ib_dbg(dev, "Invalid prefetch request: %s\n",
786                                             is_odp_mr(mr) ?  "MR is not ODP" :
787                                             "PD is not of the MR");
788                                 ret = -EINVAL;
789                                 goto srcu_unlock;
790                         }
791                 }
792
793                 if (!is_odp_mr(mr)) {
794                         mlx5_ib_dbg(dev, "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
795                                     key);
796                         if (bytes_mapped)
797                                 *bytes_mapped += bcnt;
798                         ret = 0;
799                         goto srcu_unlock;
800                 }
801
802                 ret = pagefault_mr(dev, mr, io_virt, bcnt, bytes_mapped, flags);
803                 if (ret < 0)
804                         goto srcu_unlock;
805
806                 npages += ret;
807                 ret = 0;
808                 break;
809
810         case MLX5_MKEY_MW:
811         case MLX5_MKEY_INDIRECT_DEVX:
812                 ndescs = get_indirect_num_descs(mmkey);
813
814                 if (depth >= MLX5_CAP_GEN(dev->mdev, max_indirection)) {
815                         mlx5_ib_dbg(dev, "indirection level exceeded\n");
816                         ret = -EFAULT;
817                         goto srcu_unlock;
818                 }
819
820                 outlen = MLX5_ST_SZ_BYTES(query_mkey_out) +
821                         sizeof(*pklm) * (ndescs - 2);
822
823                 if (outlen > cur_outlen) {
824                         kfree(out);
825                         out = kzalloc(outlen, GFP_KERNEL);
826                         if (!out) {
827                                 ret = -ENOMEM;
828                                 goto srcu_unlock;
829                         }
830                         cur_outlen = outlen;
831                 }
832
833                 pklm = (struct mlx5_klm *)MLX5_ADDR_OF(query_mkey_out, out,
834                                                        bsf0_klm0_pas_mtt0_1);
835
836                 ret = mlx5_core_query_mkey(dev->mdev, mmkey, out, outlen);
837                 if (ret)
838                         goto srcu_unlock;
839
840                 offset = io_virt - MLX5_GET64(query_mkey_out, out,
841                                               memory_key_mkey_entry.start_addr);
842
843                 for (i = 0; bcnt && i < ndescs; i++, pklm++) {
844                         if (offset >= be32_to_cpu(pklm->bcount)) {
845                                 offset -= be32_to_cpu(pklm->bcount);
846                                 continue;
847                         }
848
849                         frame = kzalloc(sizeof(*frame), GFP_KERNEL);
850                         if (!frame) {
851                                 ret = -ENOMEM;
852                                 goto srcu_unlock;
853                         }
854
855                         frame->key = be32_to_cpu(pklm->key);
856                         frame->io_virt = be64_to_cpu(pklm->va) + offset;
857                         frame->bcnt = min_t(size_t, bcnt,
858                                             be32_to_cpu(pklm->bcount) - offset);
859                         frame->depth = depth + 1;
860                         frame->next = head;
861                         head = frame;
862
863                         bcnt -= frame->bcnt;
864                         offset = 0;
865                 }
866                 break;
867
868         default:
869                 mlx5_ib_dbg(dev, "wrong mkey type %d\n", mmkey->type);
870                 ret = -EFAULT;
871                 goto srcu_unlock;
872         }
873
874         if (head) {
875                 frame = head;
876                 head = frame->next;
877
878                 key = frame->key;
879                 io_virt = frame->io_virt;
880                 bcnt = frame->bcnt;
881                 depth = frame->depth;
882                 kfree(frame);
883
884                 goto next_mr;
885         }
886
887 srcu_unlock:
888         while (head) {
889                 frame = head;
890                 head = frame->next;
891                 kfree(frame);
892         }
893         kfree(out);
894
895         srcu_read_unlock(&dev->mr_srcu, srcu_key);
896         *bytes_committed = 0;
897         return ret ? ret : npages;
898 }
899
900 /**
901  * Parse a series of data segments for page fault handling.
902  *
903  * @pfault contains page fault information.
904  * @wqe points at the first data segment in the WQE.
905  * @wqe_end points after the end of the WQE.
906  * @bytes_mapped receives the number of bytes that the function was able to
907  *               map. This allows the caller to decide intelligently whether
908  *               enough memory was mapped to resolve the page fault
909  *               successfully (e.g. enough for the next MTU, or the entire
910  *               WQE).
911  * @total_wqe_bytes receives the total data size of this WQE in bytes (minus
912  *                  the committed bytes).
913  *
914  * Returns the number of pages loaded if positive, zero for an empty WQE, or a
915  * negative error code.
916  */
917 static int pagefault_data_segments(struct mlx5_ib_dev *dev,
918                                    struct mlx5_pagefault *pfault,
919                                    void *wqe,
920                                    void *wqe_end, u32 *bytes_mapped,
921                                    u32 *total_wqe_bytes, bool receive_queue)
922 {
923         int ret = 0, npages = 0;
924         u64 io_virt;
925         u32 key;
926         u32 byte_count;
927         size_t bcnt;
928         int inline_segment;
929
930         if (bytes_mapped)
931                 *bytes_mapped = 0;
932         if (total_wqe_bytes)
933                 *total_wqe_bytes = 0;
934
935         while (wqe < wqe_end) {
936                 struct mlx5_wqe_data_seg *dseg = wqe;
937
938                 io_virt = be64_to_cpu(dseg->addr);
939                 key = be32_to_cpu(dseg->lkey);
940                 byte_count = be32_to_cpu(dseg->byte_count);
941                 inline_segment = !!(byte_count &  MLX5_INLINE_SEG);
942                 bcnt           = byte_count & ~MLX5_INLINE_SEG;
943
944                 if (inline_segment) {
945                         bcnt = bcnt & MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK;
946                         wqe += ALIGN(sizeof(struct mlx5_wqe_inline_seg) + bcnt,
947                                      16);
948                 } else {
949                         wqe += sizeof(*dseg);
950                 }
951
952                 /* receive WQE end of sg list. */
953                 if (receive_queue && bcnt == 0 && key == MLX5_INVALID_LKEY &&
954                     io_virt == 0)
955                         break;
956
957                 if (!inline_segment && total_wqe_bytes) {
958                         *total_wqe_bytes += bcnt - min_t(size_t, bcnt,
959                                         pfault->bytes_committed);
960                 }
961
962                 /* A zero length data segment designates a length of 2GB. */
963                 if (bcnt == 0)
964                         bcnt = 1U << 31;
965
966                 if (inline_segment || bcnt <= pfault->bytes_committed) {
967                         pfault->bytes_committed -=
968                                 min_t(size_t, bcnt,
969                                       pfault->bytes_committed);
970                         continue;
971                 }
972
973                 ret = pagefault_single_data_segment(dev, NULL, key,
974                                                     io_virt, bcnt,
975                                                     &pfault->bytes_committed,
976                                                     bytes_mapped, 0);
977                 if (ret < 0)
978                         break;
979                 npages += ret;
980         }
981
982         return ret < 0 ? ret : npages;
983 }
984
985 /*
986  * Parse initiator WQE. Advances the wqe pointer to point at the
987  * scatter-gather list, and set wqe_end to the end of the WQE.
988  */
989 static int mlx5_ib_mr_initiator_pfault_handler(
990         struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault,
991         struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length)
992 {
993         struct mlx5_wqe_ctrl_seg *ctrl = *wqe;
994         u16 wqe_index = pfault->wqe.wqe_index;
995         struct mlx5_base_av *av;
996         unsigned ds, opcode;
997         u32 qpn = qp->trans_qp.base.mqp.qpn;
998
999         ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
1000         if (ds * MLX5_WQE_DS_UNITS > wqe_length) {
1001                 mlx5_ib_err(dev, "Unable to read the complete WQE. ds = 0x%x, ret = 0x%x\n",
1002                             ds, wqe_length);
1003                 return -EFAULT;
1004         }
1005
1006         if (ds == 0) {
1007                 mlx5_ib_err(dev, "Got WQE with zero DS. wqe_index=%x, qpn=%x\n",
1008                             wqe_index, qpn);
1009                 return -EFAULT;
1010         }
1011
1012         *wqe_end = *wqe + ds * MLX5_WQE_DS_UNITS;
1013         *wqe += sizeof(*ctrl);
1014
1015         opcode = be32_to_cpu(ctrl->opmod_idx_opcode) &
1016                  MLX5_WQE_CTRL_OPCODE_MASK;
1017
1018         if (qp->ibqp.qp_type == IB_QPT_XRC_INI)
1019                 *wqe += sizeof(struct mlx5_wqe_xrc_seg);
1020
1021         if (qp->ibqp.qp_type == IB_QPT_UD ||
1022             qp->qp_sub_type == MLX5_IB_QPT_DCI) {
1023                 av = *wqe;
1024                 if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV))
1025                         *wqe += sizeof(struct mlx5_av);
1026                 else
1027                         *wqe += sizeof(struct mlx5_base_av);
1028         }
1029
1030         switch (opcode) {
1031         case MLX5_OPCODE_RDMA_WRITE:
1032         case MLX5_OPCODE_RDMA_WRITE_IMM:
1033         case MLX5_OPCODE_RDMA_READ:
1034                 *wqe += sizeof(struct mlx5_wqe_raddr_seg);
1035                 break;
1036         case MLX5_OPCODE_ATOMIC_CS:
1037         case MLX5_OPCODE_ATOMIC_FA:
1038                 *wqe += sizeof(struct mlx5_wqe_raddr_seg);
1039                 *wqe += sizeof(struct mlx5_wqe_atomic_seg);
1040                 break;
1041         }
1042
1043         return 0;
1044 }
1045
1046 /*
1047  * Parse responder WQE and set wqe_end to the end of the WQE.
1048  */
1049 static int mlx5_ib_mr_responder_pfault_handler_srq(struct mlx5_ib_dev *dev,
1050                                                    struct mlx5_ib_srq *srq,
1051                                                    void **wqe, void **wqe_end,
1052                                                    int wqe_length)
1053 {
1054         int wqe_size = 1 << srq->msrq.wqe_shift;
1055
1056         if (wqe_size > wqe_length) {
1057                 mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n");
1058                 return -EFAULT;
1059         }
1060
1061         *wqe_end = *wqe + wqe_size;
1062         *wqe += sizeof(struct mlx5_wqe_srq_next_seg);
1063
1064         return 0;
1065 }
1066
1067 static int mlx5_ib_mr_responder_pfault_handler_rq(struct mlx5_ib_dev *dev,
1068                                                   struct mlx5_ib_qp *qp,
1069                                                   void *wqe, void **wqe_end,
1070                                                   int wqe_length)
1071 {
1072         struct mlx5_ib_wq *wq = &qp->rq;
1073         int wqe_size = 1 << wq->wqe_shift;
1074
1075         if (qp->wq_sig) {
1076                 mlx5_ib_err(dev, "ODP fault with WQE signatures is not supported\n");
1077                 return -EFAULT;
1078         }
1079
1080         if (wqe_size > wqe_length) {
1081                 mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n");
1082                 return -EFAULT;
1083         }
1084
1085         *wqe_end = wqe + wqe_size;
1086
1087         return 0;
1088 }
1089
1090 static inline struct mlx5_core_rsc_common *odp_get_rsc(struct mlx5_ib_dev *dev,
1091                                                        u32 wq_num, int pf_type)
1092 {
1093         struct mlx5_core_rsc_common *common = NULL;
1094         struct mlx5_core_srq *srq;
1095
1096         switch (pf_type) {
1097         case MLX5_WQE_PF_TYPE_RMP:
1098                 srq = mlx5_cmd_get_srq(dev, wq_num);
1099                 if (srq)
1100                         common = &srq->common;
1101                 break;
1102         case MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE:
1103         case MLX5_WQE_PF_TYPE_RESP:
1104         case MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC:
1105                 common = mlx5_core_res_hold(dev->mdev, wq_num, MLX5_RES_QP);
1106                 break;
1107         default:
1108                 break;
1109         }
1110
1111         return common;
1112 }
1113
1114 static inline struct mlx5_ib_qp *res_to_qp(struct mlx5_core_rsc_common *res)
1115 {
1116         struct mlx5_core_qp *mqp = (struct mlx5_core_qp *)res;
1117
1118         return to_mibqp(mqp);
1119 }
1120
1121 static inline struct mlx5_ib_srq *res_to_srq(struct mlx5_core_rsc_common *res)
1122 {
1123         struct mlx5_core_srq *msrq =
1124                 container_of(res, struct mlx5_core_srq, common);
1125
1126         return to_mibsrq(msrq);
1127 }
1128
1129 static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
1130                                           struct mlx5_pagefault *pfault)
1131 {
1132         bool sq = pfault->type & MLX5_PFAULT_REQUESTOR;
1133         u16 wqe_index = pfault->wqe.wqe_index;
1134         void *wqe, *wqe_start = NULL, *wqe_end = NULL;
1135         u32 bytes_mapped, total_wqe_bytes;
1136         struct mlx5_core_rsc_common *res;
1137         int resume_with_error = 1;
1138         struct mlx5_ib_qp *qp;
1139         size_t bytes_copied;
1140         int ret = 0;
1141
1142         res = odp_get_rsc(dev, pfault->wqe.wq_num, pfault->type);
1143         if (!res) {
1144                 mlx5_ib_dbg(dev, "wqe page fault for missing resource %d\n", pfault->wqe.wq_num);
1145                 return;
1146         }
1147
1148         if (res->res != MLX5_RES_QP && res->res != MLX5_RES_SRQ &&
1149             res->res != MLX5_RES_XSRQ) {
1150                 mlx5_ib_err(dev, "wqe page fault for unsupported type %d\n",
1151                             pfault->type);
1152                 goto resolve_page_fault;
1153         }
1154
1155         wqe_start = (void *)__get_free_page(GFP_KERNEL);
1156         if (!wqe_start) {
1157                 mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n");
1158                 goto resolve_page_fault;
1159         }
1160
1161         wqe = wqe_start;
1162         qp = (res->res == MLX5_RES_QP) ? res_to_qp(res) : NULL;
1163         if (qp && sq) {
1164                 ret = mlx5_ib_read_user_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE,
1165                                                &bytes_copied);
1166                 if (ret)
1167                         goto read_user;
1168                 ret = mlx5_ib_mr_initiator_pfault_handler(
1169                         dev, pfault, qp, &wqe, &wqe_end, bytes_copied);
1170         } else if (qp && !sq) {
1171                 ret = mlx5_ib_read_user_wqe_rq(qp, wqe_index, wqe, PAGE_SIZE,
1172                                                &bytes_copied);
1173                 if (ret)
1174                         goto read_user;
1175                 ret = mlx5_ib_mr_responder_pfault_handler_rq(
1176                         dev, qp, wqe, &wqe_end, bytes_copied);
1177         } else if (!qp) {
1178                 struct mlx5_ib_srq *srq = res_to_srq(res);
1179
1180                 ret = mlx5_ib_read_user_wqe_srq(srq, wqe_index, wqe, PAGE_SIZE,
1181                                                 &bytes_copied);
1182                 if (ret)
1183                         goto read_user;
1184                 ret = mlx5_ib_mr_responder_pfault_handler_srq(
1185                         dev, srq, &wqe, &wqe_end, bytes_copied);
1186         }
1187
1188         if (ret < 0 || wqe >= wqe_end)
1189                 goto resolve_page_fault;
1190
1191         ret = pagefault_data_segments(dev, pfault, wqe, wqe_end, &bytes_mapped,
1192                                       &total_wqe_bytes, !sq);
1193         if (ret == -EAGAIN)
1194                 goto out;
1195
1196         if (ret < 0 || total_wqe_bytes > bytes_mapped)
1197                 goto resolve_page_fault;
1198
1199 out:
1200         ret = 0;
1201         resume_with_error = 0;
1202
1203 read_user:
1204         if (ret)
1205                 mlx5_ib_err(
1206                         dev,
1207                         "Failed reading a WQE following page fault, error %d, wqe_index %x, qpn %x\n",
1208                         ret, wqe_index, pfault->token);
1209
1210 resolve_page_fault:
1211         mlx5_ib_page_fault_resume(dev, pfault, resume_with_error);
1212         mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, type: 0x%x\n",
1213                     pfault->wqe.wq_num, resume_with_error,
1214                     pfault->type);
1215         mlx5_core_res_put(res);
1216         free_page((unsigned long)wqe_start);
1217 }
1218
1219 static int pages_in_range(u64 address, u32 length)
1220 {
1221         return (ALIGN(address + length, PAGE_SIZE) -
1222                 (address & PAGE_MASK)) >> PAGE_SHIFT;
1223 }
1224
1225 static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
1226                                            struct mlx5_pagefault *pfault)
1227 {
1228         u64 address;
1229         u32 length;
1230         u32 prefetch_len = pfault->bytes_committed;
1231         int prefetch_activated = 0;
1232         u32 rkey = pfault->rdma.r_key;
1233         int ret;
1234
1235         /* The RDMA responder handler handles the page fault in two parts.
1236          * First it brings the necessary pages for the current packet
1237          * (and uses the pfault context), and then (after resuming the QP)
1238          * prefetches more pages. The second operation cannot use the pfault
1239          * context and therefore uses the dummy_pfault context allocated on
1240          * the stack */
1241         pfault->rdma.rdma_va += pfault->bytes_committed;
1242         pfault->rdma.rdma_op_len -= min(pfault->bytes_committed,
1243                                          pfault->rdma.rdma_op_len);
1244         pfault->bytes_committed = 0;
1245
1246         address = pfault->rdma.rdma_va;
1247         length  = pfault->rdma.rdma_op_len;
1248
1249         /* For some operations, the hardware cannot tell the exact message
1250          * length, and in those cases it reports zero. Use prefetch
1251          * logic. */
1252         if (length == 0) {
1253                 prefetch_activated = 1;
1254                 length = pfault->rdma.packet_size;
1255                 prefetch_len = min(MAX_PREFETCH_LEN, prefetch_len);
1256         }
1257
1258         ret = pagefault_single_data_segment(dev, NULL, rkey, address, length,
1259                                             &pfault->bytes_committed, NULL,
1260                                             0);
1261         if (ret == -EAGAIN) {
1262                 /* We're racing with an invalidation, don't prefetch */
1263                 prefetch_activated = 0;
1264         } else if (ret < 0 || pages_in_range(address, length) > ret) {
1265                 mlx5_ib_page_fault_resume(dev, pfault, 1);
1266                 if (ret != -ENOENT)
1267                         mlx5_ib_dbg(dev, "PAGE FAULT error %d. QP 0x%x, type: 0x%x\n",
1268                                     ret, pfault->token, pfault->type);
1269                 return;
1270         }
1271
1272         mlx5_ib_page_fault_resume(dev, pfault, 0);
1273         mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x, type: 0x%x, prefetch_activated: %d\n",
1274                     pfault->token, pfault->type,
1275                     prefetch_activated);
1276
1277         /* At this point, there might be a new pagefault already arriving in
1278          * the eq, switch to the dummy pagefault for the rest of the
1279          * processing. We're still OK with the objects being alive as the
1280          * work-queue is being fenced. */
1281
1282         if (prefetch_activated) {
1283                 u32 bytes_committed = 0;
1284
1285                 ret = pagefault_single_data_segment(dev, NULL, rkey, address,
1286                                                     prefetch_len,
1287                                                     &bytes_committed, NULL,
1288                                                     0);
1289                 if (ret < 0 && ret != -EAGAIN) {
1290                         mlx5_ib_dbg(dev, "Prefetch failed. ret: %d, QP 0x%x, address: 0x%.16llx, length = 0x%.16x\n",
1291                                     ret, pfault->token, address, prefetch_len);
1292                 }
1293         }
1294 }
1295
1296 static void mlx5_ib_pfault(struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault)
1297 {
1298         u8 event_subtype = pfault->event_subtype;
1299
1300         switch (event_subtype) {
1301         case MLX5_PFAULT_SUBTYPE_WQE:
1302                 mlx5_ib_mr_wqe_pfault_handler(dev, pfault);
1303                 break;
1304         case MLX5_PFAULT_SUBTYPE_RDMA:
1305                 mlx5_ib_mr_rdma_pfault_handler(dev, pfault);
1306                 break;
1307         default:
1308                 mlx5_ib_err(dev, "Invalid page fault event subtype: 0x%x\n",
1309                             event_subtype);
1310                 mlx5_ib_page_fault_resume(dev, pfault, 1);
1311         }
1312 }
1313
1314 static void mlx5_ib_eqe_pf_action(struct work_struct *work)
1315 {
1316         struct mlx5_pagefault *pfault = container_of(work,
1317                                                      struct mlx5_pagefault,
1318                                                      work);
1319         struct mlx5_ib_pf_eq *eq = pfault->eq;
1320
1321         mlx5_ib_pfault(eq->dev, pfault);
1322         mempool_free(pfault, eq->pool);
1323 }
1324
1325 static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq)
1326 {
1327         struct mlx5_eqe_page_fault *pf_eqe;
1328         struct mlx5_pagefault *pfault;
1329         struct mlx5_eqe *eqe;
1330         int cc = 0;
1331
1332         while ((eqe = mlx5_eq_get_eqe(eq->core, cc))) {
1333                 pfault = mempool_alloc(eq->pool, GFP_ATOMIC);
1334                 if (!pfault) {
1335                         schedule_work(&eq->work);
1336                         break;
1337                 }
1338
1339                 pf_eqe = &eqe->data.page_fault;
1340                 pfault->event_subtype = eqe->sub_type;
1341                 pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed);
1342
1343                 mlx5_ib_dbg(eq->dev,
1344                             "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n",
1345                             eqe->sub_type, pfault->bytes_committed);
1346
1347                 switch (eqe->sub_type) {
1348                 case MLX5_PFAULT_SUBTYPE_RDMA:
1349                         /* RDMA based event */
1350                         pfault->type =
1351                                 be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24;
1352                         pfault->token =
1353                                 be32_to_cpu(pf_eqe->rdma.pftype_token) &
1354                                 MLX5_24BIT_MASK;
1355                         pfault->rdma.r_key =
1356                                 be32_to_cpu(pf_eqe->rdma.r_key);
1357                         pfault->rdma.packet_size =
1358                                 be16_to_cpu(pf_eqe->rdma.packet_length);
1359                         pfault->rdma.rdma_op_len =
1360                                 be32_to_cpu(pf_eqe->rdma.rdma_op_len);
1361                         pfault->rdma.rdma_va =
1362                                 be64_to_cpu(pf_eqe->rdma.rdma_va);
1363                         mlx5_ib_dbg(eq->dev,
1364                                     "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n",
1365                                     pfault->type, pfault->token,
1366                                     pfault->rdma.r_key);
1367                         mlx5_ib_dbg(eq->dev,
1368                                     "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n",
1369                                     pfault->rdma.rdma_op_len,
1370                                     pfault->rdma.rdma_va);
1371                         break;
1372
1373                 case MLX5_PFAULT_SUBTYPE_WQE:
1374                         /* WQE based event */
1375                         pfault->type =
1376                                 (be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7;
1377                         pfault->token =
1378                                 be32_to_cpu(pf_eqe->wqe.token);
1379                         pfault->wqe.wq_num =
1380                                 be32_to_cpu(pf_eqe->wqe.pftype_wq) &
1381                                 MLX5_24BIT_MASK;
1382                         pfault->wqe.wqe_index =
1383                                 be16_to_cpu(pf_eqe->wqe.wqe_index);
1384                         pfault->wqe.packet_size =
1385                                 be16_to_cpu(pf_eqe->wqe.packet_length);
1386                         mlx5_ib_dbg(eq->dev,
1387                                     "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n",
1388                                     pfault->type, pfault->token,
1389                                     pfault->wqe.wq_num,
1390                                     pfault->wqe.wqe_index);
1391                         break;
1392
1393                 default:
1394                         mlx5_ib_warn(eq->dev,
1395                                      "Unsupported page fault event sub-type: 0x%02hhx\n",
1396                                      eqe->sub_type);
1397                         /* Unsupported page faults should still be
1398                          * resolved by the page fault handler
1399                          */
1400                 }
1401
1402                 pfault->eq = eq;
1403                 INIT_WORK(&pfault->work, mlx5_ib_eqe_pf_action);
1404                 queue_work(eq->wq, &pfault->work);
1405
1406                 cc = mlx5_eq_update_cc(eq->core, ++cc);
1407         }
1408
1409         mlx5_eq_update_ci(eq->core, cc, 1);
1410 }
1411
1412 static int mlx5_ib_eq_pf_int(struct notifier_block *nb, unsigned long type,
1413                              void *data)
1414 {
1415         struct mlx5_ib_pf_eq *eq =
1416                 container_of(nb, struct mlx5_ib_pf_eq, irq_nb);
1417         unsigned long flags;
1418
1419         if (spin_trylock_irqsave(&eq->lock, flags)) {
1420                 mlx5_ib_eq_pf_process(eq);
1421                 spin_unlock_irqrestore(&eq->lock, flags);
1422         } else {
1423                 schedule_work(&eq->work);
1424         }
1425
1426         return IRQ_HANDLED;
1427 }
1428
1429 /* mempool_refill() was proposed but unfortunately wasn't accepted
1430  * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html
1431  * Cheap workaround.
1432  */
1433 static void mempool_refill(mempool_t *pool)
1434 {
1435         while (pool->curr_nr < pool->min_nr)
1436                 mempool_free(mempool_alloc(pool, GFP_KERNEL), pool);
1437 }
1438
1439 static void mlx5_ib_eq_pf_action(struct work_struct *work)
1440 {
1441         struct mlx5_ib_pf_eq *eq =
1442                 container_of(work, struct mlx5_ib_pf_eq, work);
1443
1444         mempool_refill(eq->pool);
1445
1446         spin_lock_irq(&eq->lock);
1447         mlx5_ib_eq_pf_process(eq);
1448         spin_unlock_irq(&eq->lock);
1449 }
1450
1451 enum {
1452         MLX5_IB_NUM_PF_EQE      = 0x1000,
1453         MLX5_IB_NUM_PF_DRAIN    = 64,
1454 };
1455
1456 static int
1457 mlx5_ib_create_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
1458 {
1459         struct mlx5_eq_param param = {};
1460         int err;
1461
1462         INIT_WORK(&eq->work, mlx5_ib_eq_pf_action);
1463         spin_lock_init(&eq->lock);
1464         eq->dev = dev;
1465
1466         eq->pool = mempool_create_kmalloc_pool(MLX5_IB_NUM_PF_DRAIN,
1467                                                sizeof(struct mlx5_pagefault));
1468         if (!eq->pool)
1469                 return -ENOMEM;
1470
1471         eq->wq = alloc_workqueue("mlx5_ib_page_fault",
1472                                  WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM,
1473                                  MLX5_NUM_CMD_EQE);
1474         if (!eq->wq) {
1475                 err = -ENOMEM;
1476                 goto err_mempool;
1477         }
1478
1479         eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
1480         param = (struct mlx5_eq_param) {
1481                 .irq_index = 0,
1482                 .nent = MLX5_IB_NUM_PF_EQE,
1483         };
1484         param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT;
1485         eq->core = mlx5_eq_create_generic(dev->mdev, &param);
1486         if (IS_ERR(eq->core)) {
1487                 err = PTR_ERR(eq->core);
1488                 goto err_wq;
1489         }
1490         err = mlx5_eq_enable(dev->mdev, eq->core, &eq->irq_nb);
1491         if (err) {
1492                 mlx5_ib_err(dev, "failed to enable odp EQ %d\n", err);
1493                 goto err_eq;
1494         }
1495
1496         return 0;
1497 err_eq:
1498         mlx5_eq_destroy_generic(dev->mdev, eq->core);
1499 err_wq:
1500         destroy_workqueue(eq->wq);
1501 err_mempool:
1502         mempool_destroy(eq->pool);
1503         return err;
1504 }
1505
1506 static int
1507 mlx5_ib_destroy_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
1508 {
1509         int err;
1510
1511         mlx5_eq_disable(dev->mdev, eq->core, &eq->irq_nb);
1512         err = mlx5_eq_destroy_generic(dev->mdev, eq->core);
1513         cancel_work_sync(&eq->work);
1514         destroy_workqueue(eq->wq);
1515         mempool_destroy(eq->pool);
1516
1517         return err;
1518 }
1519
1520 void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent)
1521 {
1522         if (!(ent->dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1523                 return;
1524
1525         switch (ent->order - 2) {
1526         case MLX5_IMR_MTT_CACHE_ENTRY:
1527                 ent->page = PAGE_SHIFT;
1528                 ent->xlt = MLX5_IMR_MTT_ENTRIES *
1529                            sizeof(struct mlx5_mtt) /
1530                            MLX5_IB_UMR_OCTOWORD;
1531                 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
1532                 ent->limit = 0;
1533                 break;
1534
1535         case MLX5_IMR_KSM_CACHE_ENTRY:
1536                 ent->page = MLX5_KSM_PAGE_SHIFT;
1537                 ent->xlt = mlx5_imr_ksm_entries *
1538                            sizeof(struct mlx5_klm) /
1539                            MLX5_IB_UMR_OCTOWORD;
1540                 ent->access_mode = MLX5_MKC_ACCESS_MODE_KSM;
1541                 ent->limit = 0;
1542                 break;
1543         }
1544 }
1545
1546 static const struct ib_device_ops mlx5_ib_dev_odp_ops = {
1547         .advise_mr = mlx5_ib_advise_mr,
1548         .invalidate_range = mlx5_ib_invalidate_range,
1549 };
1550
1551 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
1552 {
1553         int ret = 0;
1554
1555         if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
1556                 return ret;
1557
1558         ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops);
1559
1560         if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) {
1561                 ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey);
1562                 if (ret) {
1563                         mlx5_ib_err(dev, "Error getting null_mkey %d\n", ret);
1564                         return ret;
1565                 }
1566         }
1567
1568         ret = mlx5_ib_create_pf_eq(dev, &dev->odp_pf_eq);
1569
1570         return ret;
1571 }
1572
1573 void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev)
1574 {
1575         if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
1576                 return;
1577
1578         mlx5_ib_destroy_pf_eq(dev, &dev->odp_pf_eq);
1579 }
1580
1581 int mlx5_ib_odp_init(void)
1582 {
1583         mlx5_imr_ksm_entries = BIT_ULL(get_order(TASK_SIZE) -
1584                                        MLX5_IMR_MTT_BITS);
1585
1586         return 0;
1587 }
1588
1589 struct prefetch_mr_work {
1590         struct work_struct work;
1591         struct ib_pd *pd;
1592         u32 pf_flags;
1593         u32 num_sge;
1594         struct ib_sge sg_list[0];
1595 };
1596
1597 static void num_pending_prefetch_dec(struct mlx5_ib_dev *dev,
1598                                      struct ib_sge *sg_list, u32 num_sge,
1599                                      u32 from)
1600 {
1601         u32 i;
1602         int srcu_key;
1603
1604         srcu_key = srcu_read_lock(&dev->mr_srcu);
1605
1606         for (i = from; i < num_sge; ++i) {
1607                 struct mlx5_core_mkey *mmkey;
1608                 struct mlx5_ib_mr *mr;
1609
1610                 mmkey = xa_load(&dev->mdev->priv.mkey_table,
1611                                 mlx5_base_mkey(sg_list[i].lkey));
1612                 mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
1613                 atomic_dec(&mr->num_pending_prefetch);
1614         }
1615
1616         srcu_read_unlock(&dev->mr_srcu, srcu_key);
1617 }
1618
1619 static bool num_pending_prefetch_inc(struct ib_pd *pd,
1620                                      struct ib_sge *sg_list, u32 num_sge)
1621 {
1622         struct mlx5_ib_dev *dev = to_mdev(pd->device);
1623         bool ret = true;
1624         u32 i;
1625
1626         for (i = 0; i < num_sge; ++i) {
1627                 struct mlx5_core_mkey *mmkey;
1628                 struct mlx5_ib_mr *mr;
1629
1630                 mmkey = xa_load(&dev->mdev->priv.mkey_table,
1631                                 mlx5_base_mkey(sg_list[i].lkey));
1632                 if (!mmkey || mmkey->key != sg_list[i].lkey) {
1633                         ret = false;
1634                         break;
1635                 }
1636
1637                 if (mmkey->type != MLX5_MKEY_MR) {
1638                         ret = false;
1639                         break;
1640                 }
1641
1642                 mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
1643
1644                 if (mr->ibmr.pd != pd) {
1645                         ret = false;
1646                         break;
1647                 }
1648
1649                 if (!mr->live) {
1650                         ret = false;
1651                         break;
1652                 }
1653
1654                 atomic_inc(&mr->num_pending_prefetch);
1655         }
1656
1657         if (!ret)
1658                 num_pending_prefetch_dec(dev, sg_list, i, 0);
1659
1660         return ret;
1661 }
1662
1663 static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd, u32 pf_flags,
1664                                     struct ib_sge *sg_list, u32 num_sge)
1665 {
1666         u32 i;
1667         int ret = 0;
1668         struct mlx5_ib_dev *dev = to_mdev(pd->device);
1669
1670         for (i = 0; i < num_sge; ++i) {
1671                 struct ib_sge *sg = &sg_list[i];
1672                 int bytes_committed = 0;
1673
1674                 ret = pagefault_single_data_segment(dev, pd, sg->lkey, sg->addr,
1675                                                     sg->length,
1676                                                     &bytes_committed, NULL,
1677                                                     pf_flags);
1678                 if (ret < 0)
1679                         break;
1680         }
1681
1682         return ret < 0 ? ret : 0;
1683 }
1684
1685 static void mlx5_ib_prefetch_mr_work(struct work_struct *work)
1686 {
1687         struct prefetch_mr_work *w =
1688                 container_of(work, struct prefetch_mr_work, work);
1689
1690         if (ib_device_try_get(w->pd->device)) {
1691                 mlx5_ib_prefetch_sg_list(w->pd, w->pf_flags, w->sg_list,
1692                                          w->num_sge);
1693                 ib_device_put(w->pd->device);
1694         }
1695
1696         num_pending_prefetch_dec(to_mdev(w->pd->device), w->sg_list,
1697                                  w->num_sge, 0);
1698         kvfree(w);
1699 }
1700
1701 int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1702                                enum ib_uverbs_advise_mr_advice advice,
1703                                u32 flags, struct ib_sge *sg_list, u32 num_sge)
1704 {
1705         struct mlx5_ib_dev *dev = to_mdev(pd->device);
1706         u32 pf_flags = MLX5_PF_FLAGS_PREFETCH;
1707         struct prefetch_mr_work *work;
1708         bool valid_req;
1709         int srcu_key;
1710
1711         if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH)
1712                 pf_flags |= MLX5_PF_FLAGS_DOWNGRADE;
1713
1714         if (flags & IB_UVERBS_ADVISE_MR_FLAG_FLUSH)
1715                 return mlx5_ib_prefetch_sg_list(pd, pf_flags, sg_list,
1716                                                 num_sge);
1717
1718         work = kvzalloc(struct_size(work, sg_list, num_sge), GFP_KERNEL);
1719         if (!work)
1720                 return -ENOMEM;
1721
1722         memcpy(work->sg_list, sg_list, num_sge * sizeof(struct ib_sge));
1723
1724         /* It is guaranteed that the pd when work is executed is the pd when
1725          * work was queued since pd can't be destroyed while it holds MRs and
1726          * destroying a MR leads to flushing the workquque
1727          */
1728         work->pd = pd;
1729         work->pf_flags = pf_flags;
1730         work->num_sge = num_sge;
1731
1732         INIT_WORK(&work->work, mlx5_ib_prefetch_mr_work);
1733
1734         srcu_key = srcu_read_lock(&dev->mr_srcu);
1735
1736         valid_req = num_pending_prefetch_inc(pd, sg_list, num_sge);
1737         if (valid_req)
1738                 queue_work(system_unbound_wq, &work->work);
1739         else
1740                 kvfree(work);
1741
1742         srcu_read_unlock(&dev->mr_srcu, srcu_key);
1743
1744         return valid_req ? 0 : -EINVAL;
1745 }