2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2020, Intel Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/kref.h>
36 #include <linux/random.h>
37 #include <linux/debugfs.h>
38 #include <linux/export.h>
39 #include <linux/delay.h>
40 #include <linux/dma-buf.h>
41 #include <linux/dma-resv.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/ib_umem_odp.h>
44 #include <rdma/ib_verbs.h>
49 * We can't use an array for xlt_emergency_page because dma_map_single doesn't
50 * work on kernel modules memory
52 void *xlt_emergency_page;
53 static DEFINE_MUTEX(xlt_emergency_page_mutex);
56 MAX_PENDING_REG_MR = 8,
59 #define MLX5_UMR_ALIGN 2048
62 create_mkey_callback(int status, struct mlx5_async_work *context);
63 static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
64 u64 iova, int access_flags,
65 unsigned int page_size, bool populate);
67 static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
70 struct mlx5_ib_dev *dev = to_mdev(pd->device);
72 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
73 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
74 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
75 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
76 MLX5_SET(mkc, mkc, lr, 1);
78 if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
79 MLX5_SET(mkc, mkc, relaxed_ordering_write,
80 !!(acc & IB_ACCESS_RELAXED_ORDERING));
81 if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
82 MLX5_SET(mkc, mkc, relaxed_ordering_read,
83 !!(acc & IB_ACCESS_RELAXED_ORDERING));
85 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
86 MLX5_SET(mkc, mkc, qpn, 0xffffff);
87 MLX5_SET64(mkc, mkc, start_addr, start_addr);
91 assign_mkey_variant(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey,
94 u8 key = atomic_inc_return(&dev->mkey_var);
97 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
98 MLX5_SET(mkc, mkc, mkey_7_0, key);
103 mlx5_ib_create_mkey(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey,
106 assign_mkey_variant(dev, mkey, in);
107 return mlx5_core_create_mkey(dev->mdev, mkey, in, inlen);
111 mlx5_ib_create_mkey_cb(struct mlx5_ib_dev *dev,
112 struct mlx5_core_mkey *mkey,
113 struct mlx5_async_ctx *async_ctx,
114 u32 *in, int inlen, u32 *out, int outlen,
115 struct mlx5_async_work *context)
117 MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
118 assign_mkey_variant(dev, mkey, in);
119 return mlx5_cmd_exec_cb(async_ctx, in, inlen, out, outlen,
120 create_mkey_callback, context);
123 static int mr_cache_max_order(struct mlx5_ib_dev *dev);
124 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent);
126 static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
128 return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
131 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
133 WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key)));
135 return mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
138 static void create_mkey_callback(int status, struct mlx5_async_work *context)
140 struct mlx5_ib_mr *mr =
141 container_of(context, struct mlx5_ib_mr, cb_work);
142 struct mlx5_cache_ent *ent = mr->cache_ent;
143 struct mlx5_ib_dev *dev = ent->dev;
147 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
149 spin_lock_irqsave(&ent->lock, flags);
151 WRITE_ONCE(dev->fill_delay, 1);
152 spin_unlock_irqrestore(&ent->lock, flags);
153 mod_timer(&dev->delay_timer, jiffies + HZ);
157 mr->mmkey.type = MLX5_MKEY_MR;
158 mr->mmkey.key |= mlx5_idx_to_mkey(
159 MLX5_GET(create_mkey_out, mr->out, mkey_index));
160 init_waitqueue_head(&mr->mmkey.wait);
162 WRITE_ONCE(dev->cache.last_add, jiffies);
164 spin_lock_irqsave(&ent->lock, flags);
165 list_add_tail(&mr->list, &ent->head);
166 ent->available_mrs++;
168 /* If we are doing fill_to_high_water then keep going. */
169 queue_adjust_cache_locked(ent);
171 spin_unlock_irqrestore(&ent->lock, flags);
174 static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc)
176 struct mlx5_ib_mr *mr;
178 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
183 set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd);
184 MLX5_SET(mkc, mkc, free, 1);
185 MLX5_SET(mkc, mkc, umr_en, 1);
186 MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3);
187 MLX5_SET(mkc, mkc, access_mode_4_2, (ent->access_mode >> 2) & 0x7);
189 MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
190 MLX5_SET(mkc, mkc, log_page_size, ent->page);
194 /* Asynchronously schedule new MRs to be populated in the cache. */
195 static int add_keys(struct mlx5_cache_ent *ent, unsigned int num)
197 size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
198 struct mlx5_ib_mr *mr;
204 in = kzalloc(inlen, GFP_KERNEL);
208 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
209 for (i = 0; i < num; i++) {
210 mr = alloc_cache_mr(ent, mkc);
215 spin_lock_irq(&ent->lock);
216 if (ent->pending >= MAX_PENDING_REG_MR) {
218 spin_unlock_irq(&ent->lock);
223 spin_unlock_irq(&ent->lock);
224 err = mlx5_ib_create_mkey_cb(ent->dev, &mr->mmkey,
225 &ent->dev->async_ctx, in, inlen,
226 mr->out, sizeof(mr->out),
229 spin_lock_irq(&ent->lock);
231 spin_unlock_irq(&ent->lock);
232 mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err);
242 /* Synchronously create a MR in the cache */
243 static struct mlx5_ib_mr *create_cache_mr(struct mlx5_cache_ent *ent)
245 size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
246 struct mlx5_ib_mr *mr;
251 in = kzalloc(inlen, GFP_KERNEL);
253 return ERR_PTR(-ENOMEM);
254 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
256 mr = alloc_cache_mr(ent, mkc);
262 err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey, in, inlen);
266 mr->mmkey.type = MLX5_MKEY_MR;
267 WRITE_ONCE(ent->dev->cache.last_add, jiffies);
268 spin_lock_irq(&ent->lock);
270 spin_unlock_irq(&ent->lock);
280 static void remove_cache_mr_locked(struct mlx5_cache_ent *ent)
282 struct mlx5_ib_mr *mr;
284 lockdep_assert_held(&ent->lock);
285 if (list_empty(&ent->head))
287 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
289 ent->available_mrs--;
291 spin_unlock_irq(&ent->lock);
292 mlx5_core_destroy_mkey(ent->dev->mdev, &mr->mmkey);
294 spin_lock_irq(&ent->lock);
297 static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target,
302 lockdep_assert_held(&ent->lock);
306 target = ent->limit * 2;
307 if (target == ent->available_mrs + ent->pending)
309 if (target > ent->available_mrs + ent->pending) {
310 u32 todo = target - (ent->available_mrs + ent->pending);
312 spin_unlock_irq(&ent->lock);
313 err = add_keys(ent, todo);
315 usleep_range(3000, 5000);
316 spin_lock_irq(&ent->lock);
323 remove_cache_mr_locked(ent);
328 static ssize_t size_write(struct file *filp, const char __user *buf,
329 size_t count, loff_t *pos)
331 struct mlx5_cache_ent *ent = filp->private_data;
335 err = kstrtou32_from_user(buf, count, 0, &target);
340 * Target is the new value of total_mrs the user requests, however we
341 * cannot free MRs that are in use. Compute the target value for
344 spin_lock_irq(&ent->lock);
345 if (target < ent->total_mrs - ent->available_mrs) {
349 target = target - (ent->total_mrs - ent->available_mrs);
350 if (target < ent->limit || target > ent->limit*2) {
354 err = resize_available_mrs(ent, target, false);
357 spin_unlock_irq(&ent->lock);
362 spin_unlock_irq(&ent->lock);
366 static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
369 struct mlx5_cache_ent *ent = filp->private_data;
373 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->total_mrs);
377 return simple_read_from_buffer(buf, count, pos, lbuf, err);
380 static const struct file_operations size_fops = {
381 .owner = THIS_MODULE,
387 static ssize_t limit_write(struct file *filp, const char __user *buf,
388 size_t count, loff_t *pos)
390 struct mlx5_cache_ent *ent = filp->private_data;
394 err = kstrtou32_from_user(buf, count, 0, &var);
399 * Upon set we immediately fill the cache to high water mark implied by
402 spin_lock_irq(&ent->lock);
404 err = resize_available_mrs(ent, 0, true);
405 spin_unlock_irq(&ent->lock);
411 static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
414 struct mlx5_cache_ent *ent = filp->private_data;
418 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
422 return simple_read_from_buffer(buf, count, pos, lbuf, err);
425 static const struct file_operations limit_fops = {
426 .owner = THIS_MODULE,
428 .write = limit_write,
432 static bool someone_adding(struct mlx5_mr_cache *cache)
436 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
437 struct mlx5_cache_ent *ent = &cache->ent[i];
440 spin_lock_irq(&ent->lock);
441 ret = ent->available_mrs < ent->limit;
442 spin_unlock_irq(&ent->lock);
450 * Check if the bucket is outside the high/low water mark and schedule an async
451 * update. The cache refill has hysteresis, once the low water mark is hit it is
452 * refilled up to the high mark.
454 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
456 lockdep_assert_held(&ent->lock);
458 if (ent->disabled || READ_ONCE(ent->dev->fill_delay))
460 if (ent->available_mrs < ent->limit) {
461 ent->fill_to_high_water = true;
462 queue_work(ent->dev->cache.wq, &ent->work);
463 } else if (ent->fill_to_high_water &&
464 ent->available_mrs + ent->pending < 2 * ent->limit) {
466 * Once we start populating due to hitting a low water mark
467 * continue until we pass the high water mark.
469 queue_work(ent->dev->cache.wq, &ent->work);
470 } else if (ent->available_mrs == 2 * ent->limit) {
471 ent->fill_to_high_water = false;
472 } else if (ent->available_mrs > 2 * ent->limit) {
473 /* Queue deletion of excess entries */
474 ent->fill_to_high_water = false;
476 queue_delayed_work(ent->dev->cache.wq, &ent->dwork,
477 msecs_to_jiffies(1000));
479 queue_work(ent->dev->cache.wq, &ent->work);
483 static void __cache_work_func(struct mlx5_cache_ent *ent)
485 struct mlx5_ib_dev *dev = ent->dev;
486 struct mlx5_mr_cache *cache = &dev->cache;
489 spin_lock_irq(&ent->lock);
493 if (ent->fill_to_high_water &&
494 ent->available_mrs + ent->pending < 2 * ent->limit &&
495 !READ_ONCE(dev->fill_delay)) {
496 spin_unlock_irq(&ent->lock);
497 err = add_keys(ent, 1);
498 spin_lock_irq(&ent->lock);
503 * EAGAIN only happens if pending is positive, so we
504 * will be rescheduled from reg_mr_callback(). The only
505 * failure path here is ENOMEM.
507 if (err != -EAGAIN) {
510 "command failed order %d, err %d\n",
512 queue_delayed_work(cache->wq, &ent->dwork,
513 msecs_to_jiffies(1000));
516 } else if (ent->available_mrs > 2 * ent->limit) {
520 * The remove_cache_mr() logic is performed as garbage
521 * collection task. Such task is intended to be run when no
522 * other active processes are running.
524 * The need_resched() will return TRUE if there are user tasks
525 * to be activated in near future.
527 * In such case, we don't execute remove_cache_mr() and postpone
528 * the garbage collection work to try to run in next cycle, in
529 * order to free CPU resources to other tasks.
531 spin_unlock_irq(&ent->lock);
532 need_delay = need_resched() || someone_adding(cache) ||
534 READ_ONCE(cache->last_add) + 300 * HZ);
535 spin_lock_irq(&ent->lock);
539 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
540 remove_cache_mr_locked(ent);
541 queue_adjust_cache_locked(ent);
544 spin_unlock_irq(&ent->lock);
547 static void delayed_cache_work_func(struct work_struct *work)
549 struct mlx5_cache_ent *ent;
551 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
552 __cache_work_func(ent);
555 static void cache_work_func(struct work_struct *work)
557 struct mlx5_cache_ent *ent;
559 ent = container_of(work, struct mlx5_cache_ent, work);
560 __cache_work_func(ent);
563 /* Allocate a special entry from the cache */
564 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
565 unsigned int entry, int access_flags)
567 struct mlx5_mr_cache *cache = &dev->cache;
568 struct mlx5_cache_ent *ent;
569 struct mlx5_ib_mr *mr;
571 if (WARN_ON(entry <= MR_CACHE_LAST_STD_ENTRY ||
572 entry >= ARRAY_SIZE(cache->ent)))
573 return ERR_PTR(-EINVAL);
575 /* Matches access in alloc_cache_mr() */
576 if (!mlx5_ib_can_reconfig_with_umr(dev, 0, access_flags))
577 return ERR_PTR(-EOPNOTSUPP);
579 ent = &cache->ent[entry];
580 spin_lock_irq(&ent->lock);
581 if (list_empty(&ent->head)) {
582 spin_unlock_irq(&ent->lock);
583 mr = create_cache_mr(ent);
587 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
589 ent->available_mrs--;
590 queue_adjust_cache_locked(ent);
591 spin_unlock_irq(&ent->lock);
595 mr->access_flags = access_flags;
599 /* Return a MR already available in the cache */
600 static struct mlx5_ib_mr *get_cache_mr(struct mlx5_cache_ent *req_ent)
602 struct mlx5_ib_dev *dev = req_ent->dev;
603 struct mlx5_ib_mr *mr = NULL;
604 struct mlx5_cache_ent *ent = req_ent;
606 /* Try larger MR pools from the cache to satisfy the allocation */
607 for (; ent != &dev->cache.ent[MR_CACHE_LAST_STD_ENTRY + 1]; ent++) {
608 mlx5_ib_dbg(dev, "order %u, cache index %zu\n", ent->order,
609 ent - dev->cache.ent);
611 spin_lock_irq(&ent->lock);
612 if (!list_empty(&ent->head)) {
613 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
616 ent->available_mrs--;
617 queue_adjust_cache_locked(ent);
618 spin_unlock_irq(&ent->lock);
622 queue_adjust_cache_locked(ent);
623 spin_unlock_irq(&ent->lock);
629 static void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
631 struct mlx5_cache_ent *ent = mr->cache_ent;
633 spin_lock_irq(&ent->lock);
634 list_add_tail(&mr->list, &ent->head);
635 ent->available_mrs++;
636 queue_adjust_cache_locked(ent);
637 spin_unlock_irq(&ent->lock);
640 static void clean_keys(struct mlx5_ib_dev *dev, int c)
642 struct mlx5_mr_cache *cache = &dev->cache;
643 struct mlx5_cache_ent *ent = &cache->ent[c];
644 struct mlx5_ib_mr *tmp_mr;
645 struct mlx5_ib_mr *mr;
648 cancel_delayed_work(&ent->dwork);
650 spin_lock_irq(&ent->lock);
651 if (list_empty(&ent->head)) {
652 spin_unlock_irq(&ent->lock);
655 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
656 list_move(&mr->list, &del_list);
657 ent->available_mrs--;
659 spin_unlock_irq(&ent->lock);
660 mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
663 list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
669 static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
671 if (!mlx5_debugfs_root || dev->is_rep)
674 debugfs_remove_recursive(dev->cache.root);
675 dev->cache.root = NULL;
678 static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
680 struct mlx5_mr_cache *cache = &dev->cache;
681 struct mlx5_cache_ent *ent;
685 if (!mlx5_debugfs_root || dev->is_rep)
688 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
690 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
691 ent = &cache->ent[i];
692 sprintf(ent->name, "%d", ent->order);
693 dir = debugfs_create_dir(ent->name, cache->root);
694 debugfs_create_file("size", 0600, dir, ent, &size_fops);
695 debugfs_create_file("limit", 0600, dir, ent, &limit_fops);
696 debugfs_create_u32("cur", 0400, dir, &ent->available_mrs);
697 debugfs_create_u32("miss", 0600, dir, &ent->miss);
701 static void delay_time_func(struct timer_list *t)
703 struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer);
705 WRITE_ONCE(dev->fill_delay, 0);
708 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
710 struct mlx5_mr_cache *cache = &dev->cache;
711 struct mlx5_cache_ent *ent;
714 mutex_init(&dev->slow_path_mutex);
715 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
717 mlx5_ib_warn(dev, "failed to create work queue\n");
721 mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx);
722 timer_setup(&dev->delay_timer, delay_time_func, 0);
723 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
724 ent = &cache->ent[i];
725 INIT_LIST_HEAD(&ent->head);
726 spin_lock_init(&ent->lock);
731 INIT_WORK(&ent->work, cache_work_func);
732 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
734 if (i > MR_CACHE_LAST_STD_ENTRY) {
735 mlx5_odp_init_mr_cache_entry(ent);
739 if (ent->order > mr_cache_max_order(dev))
742 ent->page = PAGE_SHIFT;
743 ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
744 MLX5_IB_UMR_OCTOWORD;
745 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
746 if ((dev->mdev->profile.mask & MLX5_PROF_MASK_MR_CACHE) &&
747 !dev->is_rep && mlx5_core_is_pf(dev->mdev) &&
748 mlx5_ib_can_load_pas_with_umr(dev, 0))
749 ent->limit = dev->mdev->profile.mr_cache[i].limit;
752 spin_lock_irq(&ent->lock);
753 queue_adjust_cache_locked(ent);
754 spin_unlock_irq(&ent->lock);
757 mlx5_mr_cache_debugfs_init(dev);
762 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
769 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
770 struct mlx5_cache_ent *ent = &dev->cache.ent[i];
772 spin_lock_irq(&ent->lock);
773 ent->disabled = true;
774 spin_unlock_irq(&ent->lock);
775 cancel_work_sync(&ent->work);
776 cancel_delayed_work_sync(&ent->dwork);
779 mlx5_mr_cache_debugfs_cleanup(dev);
780 mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
782 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
785 destroy_workqueue(dev->cache.wq);
786 del_timer_sync(&dev->delay_timer);
791 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
793 struct mlx5_ib_dev *dev = to_mdev(pd->device);
794 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
795 struct mlx5_ib_mr *mr;
800 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
802 return ERR_PTR(-ENOMEM);
804 in = kzalloc(inlen, GFP_KERNEL);
810 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
812 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
813 MLX5_SET(mkc, mkc, length64, 1);
814 set_mkc_access_pd_addr_fields(mkc, acc, 0, pd);
816 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
821 mr->mmkey.type = MLX5_MKEY_MR;
822 mr->ibmr.lkey = mr->mmkey.key;
823 mr->ibmr.rkey = mr->mmkey.key;
837 static int get_octo_len(u64 addr, u64 len, int page_shift)
839 u64 page_size = 1ULL << page_shift;
843 offset = addr & (page_size - 1);
844 npages = ALIGN(len + offset, page_size) >> page_shift;
845 return (npages + 1) / 2;
848 static int mr_cache_max_order(struct mlx5_ib_dev *dev)
850 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
851 return MR_CACHE_LAST_STD_ENTRY + 2;
852 return MLX5_MAX_UMR_SHIFT;
855 static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
857 struct mlx5_ib_umr_context *context =
858 container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
860 context->status = wc->status;
861 complete(&context->done);
864 static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
866 context->cqe.done = mlx5_ib_umr_done;
867 context->status = -1;
868 init_completion(&context->done);
871 static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
872 struct mlx5_umr_wr *umrwr)
874 struct umr_common *umrc = &dev->umrc;
875 const struct ib_send_wr *bad;
877 struct mlx5_ib_umr_context umr_context;
879 mlx5_ib_init_umr_context(&umr_context);
880 umrwr->wr.wr_cqe = &umr_context.cqe;
883 err = ib_post_send(umrc->qp, &umrwr->wr, &bad);
885 mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err);
887 wait_for_completion(&umr_context.done);
888 if (umr_context.status != IB_WC_SUCCESS) {
889 mlx5_ib_warn(dev, "reg umr failed (%u)\n",
898 static struct mlx5_cache_ent *mr_cache_ent_from_order(struct mlx5_ib_dev *dev,
901 struct mlx5_mr_cache *cache = &dev->cache;
903 if (order < cache->ent[0].order)
904 return &cache->ent[0];
905 order = order - cache->ent[0].order;
906 if (order > MR_CACHE_LAST_STD_ENTRY)
908 return &cache->ent[order];
911 static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
912 u64 length, int access_flags)
914 mr->ibmr.lkey = mr->mmkey.key;
915 mr->ibmr.rkey = mr->mmkey.key;
916 mr->ibmr.length = length;
917 mr->ibmr.device = &dev->ib_dev;
918 mr->access_flags = access_flags;
921 static unsigned int mlx5_umem_dmabuf_default_pgsz(struct ib_umem *umem,
925 * The alignment of iova has already been checked upon entering
926 * UVERBS_METHOD_REG_DMABUF_MR
932 static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
933 struct ib_umem *umem, u64 iova,
936 struct mlx5_ib_dev *dev = to_mdev(pd->device);
937 struct mlx5_cache_ent *ent;
938 struct mlx5_ib_mr *mr;
939 unsigned int page_size;
942 page_size = mlx5_umem_dmabuf_default_pgsz(umem, iova);
944 page_size = mlx5_umem_find_best_pgsz(umem, mkc, log_page_size,
946 if (WARN_ON(!page_size))
947 return ERR_PTR(-EINVAL);
948 ent = mr_cache_ent_from_order(
949 dev, order_base_2(ib_umem_num_dma_blocks(umem, page_size)));
951 * Matches access in alloc_cache_mr(). If the MR can't come from the
952 * cache then synchronously create an uncached one.
954 if (!ent || ent->limit == 0 ||
955 !mlx5_ib_can_reconfig_with_umr(dev, 0, access_flags)) {
956 mutex_lock(&dev->slow_path_mutex);
957 mr = reg_create(pd, umem, iova, access_flags, page_size, false);
958 mutex_unlock(&dev->slow_path_mutex);
962 mr = get_cache_mr(ent);
964 mr = create_cache_mr(ent);
966 * The above already tried to do the same stuff as reg_create(),
967 * no reason to try it again.
975 mr->mmkey.iova = iova;
976 mr->mmkey.size = umem->length;
977 mr->mmkey.pd = to_mpd(pd)->pdn;
978 mr->page_shift = order_base_2(page_size);
979 set_mr_fields(dev, mr, umem->length, access_flags);
984 #define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \
985 MLX5_UMR_MTT_ALIGNMENT)
986 #define MLX5_SPARE_UMR_CHUNK 0x10000
989 * Allocate a temporary buffer to hold the per-page information to transfer to
990 * HW. For efficiency this should be as large as it can be, but buffer
991 * allocation failure is not allowed, so try smaller sizes.
993 static void *mlx5_ib_alloc_xlt(size_t *nents, size_t ent_size, gfp_t gfp_mask)
995 const size_t xlt_chunk_align =
996 MLX5_UMR_MTT_ALIGNMENT / sizeof(ent_size);
1000 static_assert(PAGE_SIZE % MLX5_UMR_MTT_ALIGNMENT == 0);
1003 * MLX5_IB_UPD_XLT_ATOMIC doesn't signal an atomic context just that the
1004 * allocation can't trigger any kind of reclaim.
1008 gfp_mask |= __GFP_ZERO | __GFP_NORETRY;
1011 * If the system already has a suitable high order page then just use
1012 * that, but don't try hard to create one. This max is about 1M, so a
1013 * free x86 huge page will satisfy it.
1015 size = min_t(size_t, ent_size * ALIGN(*nents, xlt_chunk_align),
1016 MLX5_MAX_UMR_CHUNK);
1017 *nents = size / ent_size;
1018 res = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
1023 if (size > MLX5_SPARE_UMR_CHUNK) {
1024 size = MLX5_SPARE_UMR_CHUNK;
1025 *nents = get_order(size) / ent_size;
1026 res = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
1032 *nents = PAGE_SIZE / ent_size;
1033 res = (void *)__get_free_page(gfp_mask);
1037 mutex_lock(&xlt_emergency_page_mutex);
1038 memset(xlt_emergency_page, 0, PAGE_SIZE);
1039 return xlt_emergency_page;
1042 static void mlx5_ib_free_xlt(void *xlt, size_t length)
1044 if (xlt == xlt_emergency_page) {
1045 mutex_unlock(&xlt_emergency_page_mutex);
1049 free_pages((unsigned long)xlt, get_order(length));
1053 * Create a MLX5_IB_SEND_UMR_UPDATE_XLT work request and XLT buffer ready for
1056 static void *mlx5_ib_create_xlt_wr(struct mlx5_ib_mr *mr,
1057 struct mlx5_umr_wr *wr, struct ib_sge *sg,
1058 size_t nents, size_t ent_size,
1061 struct mlx5_ib_dev *dev = mr_to_mdev(mr);
1062 struct device *ddev = &dev->mdev->pdev->dev;
1066 xlt = mlx5_ib_alloc_xlt(&nents, ent_size,
1067 flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC :
1069 sg->length = nents * ent_size;
1070 dma = dma_map_single(ddev, xlt, sg->length, DMA_TO_DEVICE);
1071 if (dma_mapping_error(ddev, dma)) {
1072 mlx5_ib_err(dev, "unable to map DMA during XLT update.\n");
1073 mlx5_ib_free_xlt(xlt, sg->length);
1077 sg->lkey = dev->umrc.pd->local_dma_lkey;
1079 memset(wr, 0, sizeof(*wr));
1080 wr->wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT;
1081 if (!(flags & MLX5_IB_UPD_XLT_ENABLE))
1082 wr->wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1083 wr->wr.sg_list = sg;
1085 wr->wr.opcode = MLX5_IB_WR_UMR;
1086 wr->pd = mr->ibmr.pd;
1087 wr->mkey = mr->mmkey.key;
1088 wr->length = mr->mmkey.size;
1089 wr->virt_addr = mr->mmkey.iova;
1090 wr->access_flags = mr->access_flags;
1091 wr->page_shift = mr->page_shift;
1092 wr->xlt_size = sg->length;
1096 static void mlx5_ib_unmap_free_xlt(struct mlx5_ib_dev *dev, void *xlt,
1099 struct device *ddev = &dev->mdev->pdev->dev;
1101 dma_unmap_single(ddev, sg->addr, sg->length, DMA_TO_DEVICE);
1102 mlx5_ib_free_xlt(xlt, sg->length);
1105 static unsigned int xlt_wr_final_send_flags(unsigned int flags)
1107 unsigned int res = 0;
1109 if (flags & MLX5_IB_UPD_XLT_ENABLE)
1110 res |= MLX5_IB_SEND_UMR_ENABLE_MR |
1111 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS |
1112 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1113 if (flags & MLX5_IB_UPD_XLT_PD || flags & MLX5_IB_UPD_XLT_ACCESS)
1114 res |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
1115 if (flags & MLX5_IB_UPD_XLT_ADDR)
1116 res |= MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1120 int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
1121 int page_shift, int flags)
1123 struct mlx5_ib_dev *dev = mr_to_mdev(mr);
1124 struct device *ddev = &dev->mdev->pdev->dev;
1126 struct mlx5_umr_wr wr;
1129 int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT)
1130 ? sizeof(struct mlx5_klm)
1131 : sizeof(struct mlx5_mtt);
1132 const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size;
1133 const int page_mask = page_align - 1;
1134 size_t pages_mapped = 0;
1135 size_t pages_to_map = 0;
1137 size_t size_to_map = 0;
1138 size_t orig_sg_length;
1140 if ((flags & MLX5_IB_UPD_XLT_INDIRECT) &&
1141 !umr_can_use_indirect_mkey(dev))
1144 if (WARN_ON(!mr->umem->is_odp))
1147 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
1148 * so we need to align the offset and length accordingly
1150 if (idx & page_mask) {
1151 npages += idx & page_mask;
1154 pages_to_map = ALIGN(npages, page_align);
1156 xlt = mlx5_ib_create_xlt_wr(mr, &wr, &sg, npages, desc_size, flags);
1159 pages_iter = sg.length / desc_size;
1160 orig_sg_length = sg.length;
1162 if (!(flags & MLX5_IB_UPD_XLT_INDIRECT)) {
1163 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
1164 size_t max_pages = ib_umem_odp_num_pages(odp) - idx;
1166 pages_to_map = min_t(size_t, pages_to_map, max_pages);
1169 wr.page_shift = page_shift;
1171 for (pages_mapped = 0;
1172 pages_mapped < pages_to_map && !err;
1173 pages_mapped += pages_iter, idx += pages_iter) {
1174 npages = min_t(int, pages_iter, pages_to_map - pages_mapped);
1175 size_to_map = npages * desc_size;
1176 dma_sync_single_for_cpu(ddev, sg.addr, sg.length,
1178 mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags);
1179 dma_sync_single_for_device(ddev, sg.addr, sg.length,
1182 sg.length = ALIGN(size_to_map, MLX5_UMR_MTT_ALIGNMENT);
1184 if (pages_mapped + pages_iter >= pages_to_map)
1185 wr.wr.send_flags |= xlt_wr_final_send_flags(flags);
1187 wr.offset = idx * desc_size;
1188 wr.xlt_size = sg.length;
1190 err = mlx5_ib_post_send_wait(dev, &wr);
1192 sg.length = orig_sg_length;
1193 mlx5_ib_unmap_free_xlt(dev, xlt, &sg);
1198 * Send the DMA list to the HW for a normal MR using UMR.
1199 * Dmabuf MR is handled in a similar way, except that the MLX5_IB_UPD_XLT_ZAP
1202 int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
1204 struct mlx5_ib_dev *dev = mr_to_mdev(mr);
1205 struct device *ddev = &dev->mdev->pdev->dev;
1206 struct ib_block_iter biter;
1207 struct mlx5_mtt *cur_mtt;
1208 struct mlx5_umr_wr wr;
1209 size_t orig_sg_length;
1210 struct mlx5_mtt *mtt;
1215 if (WARN_ON(mr->umem->is_odp))
1218 mtt = mlx5_ib_create_xlt_wr(mr, &wr, &sg,
1219 ib_umem_num_dma_blocks(mr->umem,
1220 1 << mr->page_shift),
1221 sizeof(*mtt), flags);
1224 orig_sg_length = sg.length;
1227 rdma_for_each_block (mr->umem->sg_head.sgl, &biter, mr->umem->nmap,
1228 BIT(mr->page_shift)) {
1229 if (cur_mtt == (void *)mtt + sg.length) {
1230 dma_sync_single_for_device(ddev, sg.addr, sg.length,
1232 err = mlx5_ib_post_send_wait(dev, &wr);
1235 dma_sync_single_for_cpu(ddev, sg.addr, sg.length,
1237 wr.offset += sg.length;
1242 cpu_to_be64(rdma_block_iter_dma_address(&biter) |
1243 MLX5_IB_MTT_PRESENT);
1245 if (mr->umem->is_dmabuf && (flags & MLX5_IB_UPD_XLT_ZAP))
1251 final_size = (void *)cur_mtt - (void *)mtt;
1252 sg.length = ALIGN(final_size, MLX5_UMR_MTT_ALIGNMENT);
1253 memset(cur_mtt, 0, sg.length - final_size);
1254 wr.wr.send_flags |= xlt_wr_final_send_flags(flags);
1255 wr.xlt_size = sg.length;
1257 dma_sync_single_for_device(ddev, sg.addr, sg.length, DMA_TO_DEVICE);
1258 err = mlx5_ib_post_send_wait(dev, &wr);
1261 sg.length = orig_sg_length;
1262 mlx5_ib_unmap_free_xlt(dev, mtt, &sg);
1267 * If ibmr is NULL it will be allocated by reg_create.
1268 * Else, the given ibmr will be used.
1270 static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
1271 u64 iova, int access_flags,
1272 unsigned int page_size, bool populate)
1274 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1275 struct mlx5_ib_mr *mr;
1281 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
1284 return ERR_PTR(-EINVAL);
1285 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1287 return ERR_PTR(-ENOMEM);
1290 mr->access_flags = access_flags;
1291 mr->page_shift = order_base_2(page_size);
1293 inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1295 inlen += sizeof(*pas) *
1296 roundup(ib_umem_num_dma_blocks(umem, page_size), 2);
1297 in = kvzalloc(inlen, GFP_KERNEL);
1302 pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
1304 if (WARN_ON(access_flags & IB_ACCESS_ON_DEMAND)) {
1308 mlx5_ib_populate_pas(umem, 1UL << mr->page_shift, pas,
1309 pg_cap ? MLX5_IB_MTT_PRESENT : 0);
1312 /* The pg_access bit allows setting the access flags
1313 * in the page list submitted with the command. */
1314 MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
1316 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1317 set_mkc_access_pd_addr_fields(mkc, access_flags, iova,
1318 populate ? pd : dev->umrc.pd);
1319 MLX5_SET(mkc, mkc, free, !populate);
1320 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
1321 MLX5_SET(mkc, mkc, umr_en, 1);
1323 MLX5_SET64(mkc, mkc, len, umem->length);
1324 MLX5_SET(mkc, mkc, bsf_octword_size, 0);
1325 MLX5_SET(mkc, mkc, translations_octword_size,
1326 get_octo_len(iova, umem->length, mr->page_shift));
1327 MLX5_SET(mkc, mkc, log_page_size, mr->page_shift);
1329 MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
1330 get_octo_len(iova, umem->length, mr->page_shift));
1333 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
1335 mlx5_ib_warn(dev, "create mkey failed\n");
1338 mr->mmkey.type = MLX5_MKEY_MR;
1339 mr->desc_size = sizeof(struct mlx5_mtt);
1341 set_mr_fields(dev, mr, umem->length, access_flags);
1344 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
1352 return ERR_PTR(err);
1355 static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr,
1356 u64 length, int acc, int mode)
1358 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1359 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1360 struct mlx5_ib_mr *mr;
1365 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1367 return ERR_PTR(-ENOMEM);
1369 in = kzalloc(inlen, GFP_KERNEL);
1375 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1377 MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
1378 MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7);
1379 MLX5_SET64(mkc, mkc, len, length);
1380 set_mkc_access_pd_addr_fields(mkc, acc, start_addr, pd);
1382 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
1388 set_mr_fields(dev, mr, length, acc);
1398 return ERR_PTR(err);
1401 int mlx5_ib_advise_mr(struct ib_pd *pd,
1402 enum ib_uverbs_advise_mr_advice advice,
1404 struct ib_sge *sg_list,
1406 struct uverbs_attr_bundle *attrs)
1408 if (advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH &&
1409 advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
1410 advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT)
1413 return mlx5_ib_advise_mr_prefetch(pd, advice, flags,
1417 struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
1418 struct ib_dm_mr_attr *attr,
1419 struct uverbs_attr_bundle *attrs)
1421 struct mlx5_ib_dm *mdm = to_mdm(dm);
1422 struct mlx5_core_dev *dev = to_mdev(dm->device)->mdev;
1423 u64 start_addr = mdm->dev_addr + attr->offset;
1426 switch (mdm->type) {
1427 case MLX5_IB_UAPI_DM_TYPE_MEMIC:
1428 if (attr->access_flags & ~MLX5_IB_DM_MEMIC_ALLOWED_ACCESS)
1429 return ERR_PTR(-EINVAL);
1431 mode = MLX5_MKC_ACCESS_MODE_MEMIC;
1432 start_addr -= pci_resource_start(dev->pdev, 0);
1434 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
1435 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
1436 if (attr->access_flags & ~MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS)
1437 return ERR_PTR(-EINVAL);
1439 mode = MLX5_MKC_ACCESS_MODE_SW_ICM;
1442 return ERR_PTR(-EINVAL);
1445 return mlx5_ib_get_dm_mr(pd, start_addr, attr->length,
1446 attr->access_flags, mode);
1449 static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem,
1450 u64 iova, int access_flags)
1452 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1453 struct mlx5_ib_mr *mr = NULL;
1457 xlt_with_umr = mlx5_ib_can_load_pas_with_umr(dev, umem->length);
1459 mr = alloc_cacheable_mr(pd, umem, iova, access_flags);
1461 unsigned int page_size = mlx5_umem_find_best_pgsz(
1462 umem, mkc, log_page_size, 0, iova);
1464 mutex_lock(&dev->slow_path_mutex);
1465 mr = reg_create(pd, umem, iova, access_flags, page_size, true);
1466 mutex_unlock(&dev->slow_path_mutex);
1469 ib_umem_release(umem);
1470 return ERR_CAST(mr);
1473 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
1475 atomic_add(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages);
1479 * If the MR was created with reg_create then it will be
1480 * configured properly but left disabled. It is safe to go ahead
1481 * and configure it again via UMR while enabling it.
1483 err = mlx5_ib_update_mr_pas(mr, MLX5_IB_UPD_XLT_ENABLE);
1485 mlx5_ib_dereg_mr(&mr->ibmr, NULL);
1486 return ERR_PTR(err);
1492 static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
1493 u64 iova, int access_flags,
1494 struct ib_udata *udata)
1496 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1497 struct ib_umem_odp *odp;
1498 struct mlx5_ib_mr *mr;
1501 if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1502 return ERR_PTR(-EOPNOTSUPP);
1504 err = mlx5r_odp_create_eq(dev, &dev->odp_pf_eq);
1506 return ERR_PTR(err);
1507 if (!start && length == U64_MAX) {
1509 return ERR_PTR(-EINVAL);
1510 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1511 return ERR_PTR(-EINVAL);
1513 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), udata, access_flags);
1515 return ERR_CAST(mr);
1519 /* ODP requires xlt update via umr to work. */
1520 if (!mlx5_ib_can_load_pas_with_umr(dev, length))
1521 return ERR_PTR(-EINVAL);
1523 odp = ib_umem_odp_get(&dev->ib_dev, start, length, access_flags,
1526 return ERR_CAST(odp);
1528 mr = alloc_cacheable_mr(pd, &odp->umem, iova, access_flags);
1530 ib_umem_release(&odp->umem);
1531 return ERR_CAST(mr);
1535 err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
1539 err = mlx5_ib_init_odp_mr(mr);
1545 mlx5_ib_dereg_mr(&mr->ibmr, NULL);
1546 return ERR_PTR(err);
1549 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1550 u64 iova, int access_flags,
1551 struct ib_udata *udata)
1553 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1554 struct ib_umem *umem;
1556 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
1557 return ERR_PTR(-EOPNOTSUPP);
1559 mlx5_ib_dbg(dev, "start 0x%llx, iova 0x%llx, length 0x%llx, access_flags 0x%x\n",
1560 start, iova, length, access_flags);
1562 if (access_flags & IB_ACCESS_ON_DEMAND)
1563 return create_user_odp_mr(pd, start, length, iova, access_flags,
1565 umem = ib_umem_get(&dev->ib_dev, start, length, access_flags);
1567 return ERR_CAST(umem);
1568 return create_real_mr(pd, umem, iova, access_flags);
1571 static void mlx5_ib_dmabuf_invalidate_cb(struct dma_buf_attachment *attach)
1573 struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv;
1574 struct mlx5_ib_mr *mr = umem_dmabuf->private;
1576 dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
1578 if (!umem_dmabuf->sgt)
1581 mlx5_ib_update_mr_pas(mr, MLX5_IB_UPD_XLT_ZAP);
1582 ib_umem_dmabuf_unmap_pages(umem_dmabuf);
1585 static struct dma_buf_attach_ops mlx5_ib_dmabuf_attach_ops = {
1586 .allow_peer2peer = 1,
1587 .move_notify = mlx5_ib_dmabuf_invalidate_cb,
1590 struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset,
1591 u64 length, u64 virt_addr,
1592 int fd, int access_flags,
1593 struct ib_udata *udata)
1595 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1596 struct mlx5_ib_mr *mr = NULL;
1597 struct ib_umem_dmabuf *umem_dmabuf;
1600 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) ||
1601 !IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1602 return ERR_PTR(-EOPNOTSUPP);
1605 "offset 0x%llx, virt_addr 0x%llx, length 0x%llx, fd %d, access_flags 0x%x\n",
1606 offset, virt_addr, length, fd, access_flags);
1608 /* dmabuf requires xlt update via umr to work. */
1609 if (!mlx5_ib_can_load_pas_with_umr(dev, length))
1610 return ERR_PTR(-EINVAL);
1612 umem_dmabuf = ib_umem_dmabuf_get(&dev->ib_dev, offset, length, fd,
1614 &mlx5_ib_dmabuf_attach_ops);
1615 if (IS_ERR(umem_dmabuf)) {
1616 mlx5_ib_dbg(dev, "umem_dmabuf get failed (%ld)\n",
1617 PTR_ERR(umem_dmabuf));
1618 return ERR_CAST(umem_dmabuf);
1621 mr = alloc_cacheable_mr(pd, &umem_dmabuf->umem, virt_addr,
1624 ib_umem_release(&umem_dmabuf->umem);
1625 return ERR_CAST(mr);
1628 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
1630 atomic_add(ib_umem_num_pages(mr->umem), &dev->mdev->priv.reg_pages);
1631 umem_dmabuf->private = mr;
1632 err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
1636 err = mlx5_ib_init_dmabuf_mr(mr);
1642 mlx5_ib_dereg_mr(&mr->ibmr, NULL);
1643 return ERR_PTR(err);
1647 * revoke_mr - Fence all DMA on the MR
1648 * @mr: The MR to fence
1650 * Upon return the NIC will not be doing any DMA to the pages under the MR,
1651 * and any DMA in progress will be completed. Failure of this function
1652 * indicates the HW has failed catastrophically.
1654 static int revoke_mr(struct mlx5_ib_mr *mr)
1656 struct mlx5_umr_wr umrwr = {};
1658 if (mr_to_mdev(mr)->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
1661 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
1662 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
1663 umrwr.wr.opcode = MLX5_IB_WR_UMR;
1664 umrwr.pd = mr_to_mdev(mr)->umrc.pd;
1665 umrwr.mkey = mr->mmkey.key;
1666 umrwr.ignore_free_state = 1;
1668 return mlx5_ib_post_send_wait(mr_to_mdev(mr), &umrwr);
1672 * True if the change in access flags can be done via UMR, only some access
1673 * flags can be updated.
1675 static bool can_use_umr_rereg_access(struct mlx5_ib_dev *dev,
1676 unsigned int current_access_flags,
1677 unsigned int target_access_flags)
1679 unsigned int diffs = current_access_flags ^ target_access_flags;
1681 if (diffs & ~(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
1682 IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING))
1684 return mlx5_ib_can_reconfig_with_umr(dev, current_access_flags,
1685 target_access_flags);
1688 static int umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
1691 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
1692 struct mlx5_umr_wr umrwr = {
1694 .send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE |
1695 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS,
1696 .opcode = MLX5_IB_WR_UMR,
1698 .mkey = mr->mmkey.key,
1700 .access_flags = access_flags,
1704 err = mlx5_ib_post_send_wait(dev, &umrwr);
1708 mr->access_flags = access_flags;
1709 mr->mmkey.pd = to_mpd(pd)->pdn;
1713 static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr,
1714 struct ib_umem *new_umem,
1715 int new_access_flags, u64 iova,
1716 unsigned long *page_size)
1718 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
1720 /* We only track the allocated sizes of MRs from the cache */
1723 if (!mlx5_ib_can_load_pas_with_umr(dev, new_umem->length))
1727 mlx5_umem_find_best_pgsz(new_umem, mkc, log_page_size, 0, iova);
1728 if (WARN_ON(!*page_size))
1730 return (1ULL << mr->cache_ent->order) >=
1731 ib_umem_num_dma_blocks(new_umem, *page_size);
1734 static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd,
1735 int access_flags, int flags, struct ib_umem *new_umem,
1736 u64 iova, unsigned long page_size)
1738 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
1739 int upd_flags = MLX5_IB_UPD_XLT_ADDR | MLX5_IB_UPD_XLT_ENABLE;
1740 struct ib_umem *old_umem = mr->umem;
1744 * To keep everything simple the MR is revoked before we start to mess
1745 * with it. This ensure the change is atomic relative to any use of the
1748 err = revoke_mr(mr);
1752 if (flags & IB_MR_REREG_PD) {
1754 mr->mmkey.pd = to_mpd(pd)->pdn;
1755 upd_flags |= MLX5_IB_UPD_XLT_PD;
1757 if (flags & IB_MR_REREG_ACCESS) {
1758 mr->access_flags = access_flags;
1759 upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
1762 mr->ibmr.length = new_umem->length;
1763 mr->mmkey.iova = iova;
1764 mr->mmkey.size = new_umem->length;
1765 mr->page_shift = order_base_2(page_size);
1766 mr->umem = new_umem;
1767 err = mlx5_ib_update_mr_pas(mr, upd_flags);
1770 * The MR is revoked at this point so there is no issue to free
1773 mr->umem = old_umem;
1777 atomic_sub(ib_umem_num_pages(old_umem), &dev->mdev->priv.reg_pages);
1778 ib_umem_release(old_umem);
1779 atomic_add(ib_umem_num_pages(new_umem), &dev->mdev->priv.reg_pages);
1783 struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1784 u64 length, u64 iova, int new_access_flags,
1785 struct ib_pd *new_pd,
1786 struct ib_udata *udata)
1788 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
1789 struct mlx5_ib_mr *mr = to_mmr(ib_mr);
1792 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
1793 return ERR_PTR(-EOPNOTSUPP);
1797 "start 0x%llx, iova 0x%llx, length 0x%llx, access_flags 0x%x\n",
1798 start, iova, length, new_access_flags);
1800 if (flags & ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS))
1801 return ERR_PTR(-EOPNOTSUPP);
1803 if (!(flags & IB_MR_REREG_ACCESS))
1804 new_access_flags = mr->access_flags;
1805 if (!(flags & IB_MR_REREG_PD))
1808 if (!(flags & IB_MR_REREG_TRANS)) {
1809 struct ib_umem *umem;
1811 /* Fast path for PD/access change */
1812 if (can_use_umr_rereg_access(dev, mr->access_flags,
1813 new_access_flags)) {
1814 err = umr_rereg_pd_access(mr, new_pd, new_access_flags);
1816 return ERR_PTR(err);
1819 /* DM or ODP MR's don't have a normal umem so we can't re-use it */
1820 if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr))
1824 * Only one active MR can refer to a umem at one time, revoke
1825 * the old MR before assigning the umem to the new one.
1827 err = revoke_mr(mr);
1829 return ERR_PTR(err);
1832 atomic_sub(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages);
1834 return create_real_mr(new_pd, umem, mr->mmkey.iova,
1839 * DM doesn't have a PAS list so we can't re-use it, odp/dmabuf does
1840 * but the logic around releasing the umem is different
1842 if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr))
1845 if (!(new_access_flags & IB_ACCESS_ON_DEMAND) &&
1846 can_use_umr_rereg_access(dev, mr->access_flags, new_access_flags)) {
1847 struct ib_umem *new_umem;
1848 unsigned long page_size;
1850 new_umem = ib_umem_get(&dev->ib_dev, start, length,
1852 if (IS_ERR(new_umem))
1853 return ERR_CAST(new_umem);
1855 /* Fast path for PAS change */
1856 if (can_use_umr_rereg_pas(mr, new_umem, new_access_flags, iova,
1858 err = umr_rereg_pas(mr, new_pd, new_access_flags, flags,
1859 new_umem, iova, page_size);
1861 ib_umem_release(new_umem);
1862 return ERR_PTR(err);
1866 return create_real_mr(new_pd, new_umem, iova, new_access_flags);
1870 * Everything else has no state we can preserve, just create a new MR
1874 return mlx5_ib_reg_user_mr(new_pd, start, length, iova,
1875 new_access_flags, udata);
1879 mlx5_alloc_priv_descs(struct ib_device *device,
1880 struct mlx5_ib_mr *mr,
1884 struct mlx5_ib_dev *dev = to_mdev(device);
1885 struct device *ddev = &dev->mdev->pdev->dev;
1886 int size = ndescs * desc_size;
1890 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
1892 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
1893 if (!mr->descs_alloc)
1896 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
1898 mr->desc_map = dma_map_single(ddev, mr->descs, size, DMA_TO_DEVICE);
1899 if (dma_mapping_error(ddev, mr->desc_map)) {
1906 kfree(mr->descs_alloc);
1912 mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1914 if (!mr->umem && mr->descs) {
1915 struct ib_device *device = mr->ibmr.device;
1916 int size = mr->max_descs * mr->desc_size;
1917 struct mlx5_ib_dev *dev = to_mdev(device);
1919 dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size,
1921 kfree(mr->descs_alloc);
1926 int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1928 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1929 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
1933 * Any async use of the mr must hold the refcount, once the refcount
1934 * goes to zero no other thread, such as ODP page faults, prefetch, any
1935 * UMR activity, etc can touch the mkey. Thus it is safe to destroy it.
1937 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) &&
1938 refcount_read(&mr->mmkey.usecount) != 0 &&
1939 xa_erase(&mr_to_mdev(mr)->odp_mkeys, mlx5_base_mkey(mr->mmkey.key)))
1940 mlx5r_deref_wait_odp_mkey(&mr->mmkey);
1942 if (ibmr->type == IB_MR_TYPE_INTEGRITY) {
1943 xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), ibmr,
1947 rc = mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL);
1953 rc = mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL);
1959 if (mlx5_core_destroy_psv(dev->mdev,
1960 mr->sig->psv_memory.psv_idx))
1961 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1962 mr->sig->psv_memory.psv_idx);
1963 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx))
1964 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1965 mr->sig->psv_wire.psv_idx);
1971 if (mr->cache_ent) {
1972 if (revoke_mr(mr)) {
1973 spin_lock_irq(&mr->cache_ent->lock);
1974 mr->cache_ent->total_mrs--;
1975 spin_unlock_irq(&mr->cache_ent->lock);
1976 mr->cache_ent = NULL;
1979 if (!mr->cache_ent) {
1980 rc = destroy_mkey(to_mdev(mr->ibmr.device), mr);
1986 bool is_odp = is_odp_mr(mr);
1989 atomic_sub(ib_umem_num_pages(mr->umem),
1990 &dev->mdev->priv.reg_pages);
1991 ib_umem_release(mr->umem);
1993 mlx5_ib_free_odp_mr(mr);
1996 if (mr->cache_ent) {
1997 mlx5_mr_cache_free(dev, mr);
1999 mlx5_free_priv_descs(mr);
2005 static void mlx5_set_umr_free_mkey(struct ib_pd *pd, u32 *in, int ndescs,
2006 int access_mode, int page_shift)
2010 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
2012 /* This is only used from the kernel, so setting the PD is OK. */
2013 set_mkc_access_pd_addr_fields(mkc, 0, 0, pd);
2014 MLX5_SET(mkc, mkc, free, 1);
2015 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
2016 MLX5_SET(mkc, mkc, access_mode_1_0, access_mode & 0x3);
2017 MLX5_SET(mkc, mkc, access_mode_4_2, (access_mode >> 2) & 0x7);
2018 MLX5_SET(mkc, mkc, umr_en, 1);
2019 MLX5_SET(mkc, mkc, log_page_size, page_shift);
2022 static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
2023 int ndescs, int desc_size, int page_shift,
2024 int access_mode, u32 *in, int inlen)
2026 struct mlx5_ib_dev *dev = to_mdev(pd->device);
2029 mr->access_mode = access_mode;
2030 mr->desc_size = desc_size;
2031 mr->max_descs = ndescs;
2033 err = mlx5_alloc_priv_descs(pd->device, mr, ndescs, desc_size);
2037 mlx5_set_umr_free_mkey(pd, in, ndescs, access_mode, page_shift);
2039 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
2041 goto err_free_descs;
2043 mr->mmkey.type = MLX5_MKEY_MR;
2044 mr->ibmr.lkey = mr->mmkey.key;
2045 mr->ibmr.rkey = mr->mmkey.key;
2050 mlx5_free_priv_descs(mr);
2054 static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd,
2055 u32 max_num_sg, u32 max_num_meta_sg,
2056 int desc_size, int access_mode)
2058 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
2059 int ndescs = ALIGN(max_num_sg + max_num_meta_sg, 4);
2061 struct mlx5_ib_mr *mr;
2065 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2067 return ERR_PTR(-ENOMEM);
2070 mr->ibmr.device = pd->device;
2072 in = kzalloc(inlen, GFP_KERNEL);
2078 if (access_mode == MLX5_MKC_ACCESS_MODE_MTT)
2079 page_shift = PAGE_SHIFT;
2081 err = _mlx5_alloc_mkey_descs(pd, mr, ndescs, desc_size, page_shift,
2082 access_mode, in, inlen);
2095 return ERR_PTR(err);
2098 static int mlx5_alloc_mem_reg_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
2099 int ndescs, u32 *in, int inlen)
2101 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_mtt),
2102 PAGE_SHIFT, MLX5_MKC_ACCESS_MODE_MTT, in,
2106 static int mlx5_alloc_sg_gaps_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
2107 int ndescs, u32 *in, int inlen)
2109 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_klm),
2110 0, MLX5_MKC_ACCESS_MODE_KLMS, in, inlen);
2113 static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
2114 int max_num_sg, int max_num_meta_sg,
2117 struct mlx5_ib_dev *dev = to_mdev(pd->device);
2122 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
2126 /* create mem & wire PSVs */
2127 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn, 2, psv_index);
2131 mr->sig->psv_memory.psv_idx = psv_index[0];
2132 mr->sig->psv_wire.psv_idx = psv_index[1];
2134 mr->sig->sig_status_checked = true;
2135 mr->sig->sig_err_exists = false;
2136 /* Next UMR, Arm SIGERR */
2137 ++mr->sig->sigerr_count;
2138 mr->klm_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg,
2139 sizeof(struct mlx5_klm),
2140 MLX5_MKC_ACCESS_MODE_KLMS);
2141 if (IS_ERR(mr->klm_mr)) {
2142 err = PTR_ERR(mr->klm_mr);
2143 goto err_destroy_psv;
2145 mr->mtt_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg,
2146 sizeof(struct mlx5_mtt),
2147 MLX5_MKC_ACCESS_MODE_MTT);
2148 if (IS_ERR(mr->mtt_mr)) {
2149 err = PTR_ERR(mr->mtt_mr);
2150 goto err_free_klm_mr;
2153 /* Set bsf descriptors for mkey */
2154 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
2155 MLX5_SET(mkc, mkc, bsf_en, 1);
2156 MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
2158 err = _mlx5_alloc_mkey_descs(pd, mr, 4, sizeof(struct mlx5_klm), 0,
2159 MLX5_MKC_ACCESS_MODE_KLMS, in, inlen);
2161 goto err_free_mtt_mr;
2163 err = xa_err(xa_store(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key),
2164 mr->sig, GFP_KERNEL));
2166 goto err_free_descs;
2170 destroy_mkey(dev, mr);
2171 mlx5_free_priv_descs(mr);
2173 mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL);
2176 mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL);
2179 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx))
2180 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
2181 mr->sig->psv_memory.psv_idx);
2182 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx))
2183 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
2184 mr->sig->psv_wire.psv_idx);
2191 static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd,
2192 enum ib_mr_type mr_type, u32 max_num_sg,
2193 u32 max_num_meta_sg)
2195 struct mlx5_ib_dev *dev = to_mdev(pd->device);
2196 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
2197 int ndescs = ALIGN(max_num_sg, 4);
2198 struct mlx5_ib_mr *mr;
2202 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2204 return ERR_PTR(-ENOMEM);
2206 in = kzalloc(inlen, GFP_KERNEL);
2212 mr->ibmr.device = pd->device;
2216 case IB_MR_TYPE_MEM_REG:
2217 err = mlx5_alloc_mem_reg_descs(pd, mr, ndescs, in, inlen);
2219 case IB_MR_TYPE_SG_GAPS:
2220 err = mlx5_alloc_sg_gaps_descs(pd, mr, ndescs, in, inlen);
2222 case IB_MR_TYPE_INTEGRITY:
2223 err = mlx5_alloc_integrity_descs(pd, mr, max_num_sg,
2224 max_num_meta_sg, in, inlen);
2227 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
2242 return ERR_PTR(err);
2245 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
2248 return __mlx5_ib_alloc_mr(pd, mr_type, max_num_sg, 0);
2251 struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd,
2252 u32 max_num_sg, u32 max_num_meta_sg)
2254 return __mlx5_ib_alloc_mr(pd, IB_MR_TYPE_INTEGRITY, max_num_sg,
2258 int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
2260 struct mlx5_ib_dev *dev = to_mdev(ibmw->device);
2261 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
2262 struct mlx5_ib_mw *mw = to_mmw(ibmw);
2267 struct mlx5_ib_alloc_mw req = {};
2270 __u32 response_length;
2273 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
2277 if (req.comp_mask || req.reserved1 || req.reserved2)
2280 if (udata->inlen > sizeof(req) &&
2281 !ib_is_udata_cleared(udata, sizeof(req),
2282 udata->inlen - sizeof(req)))
2285 ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
2287 in = kzalloc(inlen, GFP_KERNEL);
2293 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
2295 MLX5_SET(mkc, mkc, free, 1);
2296 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
2297 MLX5_SET(mkc, mkc, pd, to_mpd(ibmw->pd)->pdn);
2298 MLX5_SET(mkc, mkc, umr_en, 1);
2299 MLX5_SET(mkc, mkc, lr, 1);
2300 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
2301 MLX5_SET(mkc, mkc, en_rinval, !!((ibmw->type == IB_MW_TYPE_2)));
2302 MLX5_SET(mkc, mkc, qpn, 0xffffff);
2304 err = mlx5_ib_create_mkey(dev, &mw->mmkey, in, inlen);
2308 mw->mmkey.type = MLX5_MKEY_MW;
2309 ibmw->rkey = mw->mmkey.key;
2310 mw->ndescs = ndescs;
2312 resp.response_length =
2313 min(offsetofend(typeof(resp), response_length), udata->outlen);
2314 if (resp.response_length) {
2315 err = ib_copy_to_udata(udata, &resp, resp.response_length);
2320 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
2321 err = mlx5r_store_odp_mkey(dev, &mw->mmkey);
2330 mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
2336 int mlx5_ib_dealloc_mw(struct ib_mw *mw)
2338 struct mlx5_ib_dev *dev = to_mdev(mw->device);
2339 struct mlx5_ib_mw *mmw = to_mmw(mw);
2341 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) &&
2342 xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key)))
2344 * pagefault_single_data_segment() may be accessing mmw
2345 * if the user bound an ODP MR to this MW.
2347 mlx5r_deref_wait_odp_mkey(&mmw->mmkey);
2349 return mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey);
2352 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
2353 struct ib_mr_status *mr_status)
2355 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
2358 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
2359 pr_err("Invalid status check mask\n");
2364 mr_status->fail_status = 0;
2365 if (check_mask & IB_MR_CHECK_SIG_STATUS) {
2368 pr_err("signature status check requested on a non-signature enabled MR\n");
2372 mmr->sig->sig_status_checked = true;
2373 if (!mmr->sig->sig_err_exists)
2376 if (ibmr->lkey == mmr->sig->err_item.key)
2377 memcpy(&mr_status->sig_err, &mmr->sig->err_item,
2378 sizeof(mr_status->sig_err));
2380 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
2381 mr_status->sig_err.sig_err_offset = 0;
2382 mr_status->sig_err.key = mmr->sig->err_item.key;
2385 mmr->sig->sig_err_exists = false;
2386 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
2394 mlx5_ib_map_pa_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2395 int data_sg_nents, unsigned int *data_sg_offset,
2396 struct scatterlist *meta_sg, int meta_sg_nents,
2397 unsigned int *meta_sg_offset)
2399 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2400 unsigned int sg_offset = 0;
2403 mr->meta_length = 0;
2404 if (data_sg_nents == 1) {
2408 sg_offset = *data_sg_offset;
2409 mr->data_length = sg_dma_len(data_sg) - sg_offset;
2410 mr->data_iova = sg_dma_address(data_sg) + sg_offset;
2411 if (meta_sg_nents == 1) {
2413 mr->meta_ndescs = 1;
2415 sg_offset = *meta_sg_offset;
2418 mr->meta_length = sg_dma_len(meta_sg) - sg_offset;
2419 mr->pi_iova = sg_dma_address(meta_sg) + sg_offset;
2421 ibmr->length = mr->data_length + mr->meta_length;
2428 mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
2429 struct scatterlist *sgl,
2430 unsigned short sg_nents,
2431 unsigned int *sg_offset_p,
2432 struct scatterlist *meta_sgl,
2433 unsigned short meta_sg_nents,
2434 unsigned int *meta_sg_offset_p)
2436 struct scatterlist *sg = sgl;
2437 struct mlx5_klm *klms = mr->descs;
2438 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
2439 u32 lkey = mr->ibmr.pd->local_dma_lkey;
2442 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
2443 mr->ibmr.length = 0;
2445 for_each_sg(sgl, sg, sg_nents, i) {
2446 if (unlikely(i >= mr->max_descs))
2448 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
2449 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
2450 klms[i].key = cpu_to_be32(lkey);
2451 mr->ibmr.length += sg_dma_len(sg) - sg_offset;
2457 *sg_offset_p = sg_offset;
2460 mr->data_length = mr->ibmr.length;
2462 if (meta_sg_nents) {
2464 sg_offset = meta_sg_offset_p ? *meta_sg_offset_p : 0;
2465 for_each_sg(meta_sgl, sg, meta_sg_nents, j) {
2466 if (unlikely(i + j >= mr->max_descs))
2468 klms[i + j].va = cpu_to_be64(sg_dma_address(sg) +
2470 klms[i + j].bcount = cpu_to_be32(sg_dma_len(sg) -
2472 klms[i + j].key = cpu_to_be32(lkey);
2473 mr->ibmr.length += sg_dma_len(sg) - sg_offset;
2477 if (meta_sg_offset_p)
2478 *meta_sg_offset_p = sg_offset;
2480 mr->meta_ndescs = j;
2481 mr->meta_length = mr->ibmr.length - mr->data_length;
2487 static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
2489 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2492 if (unlikely(mr->ndescs == mr->max_descs))
2496 descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
2501 static int mlx5_set_page_pi(struct ib_mr *ibmr, u64 addr)
2503 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2506 if (unlikely(mr->ndescs + mr->meta_ndescs == mr->max_descs))
2510 descs[mr->ndescs + mr->meta_ndescs++] =
2511 cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
2517 mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2518 int data_sg_nents, unsigned int *data_sg_offset,
2519 struct scatterlist *meta_sg, int meta_sg_nents,
2520 unsigned int *meta_sg_offset)
2522 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2523 struct mlx5_ib_mr *pi_mr = mr->mtt_mr;
2527 pi_mr->meta_ndescs = 0;
2528 pi_mr->meta_length = 0;
2530 ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map,
2531 pi_mr->desc_size * pi_mr->max_descs,
2534 pi_mr->ibmr.page_size = ibmr->page_size;
2535 n = ib_sg_to_pages(&pi_mr->ibmr, data_sg, data_sg_nents, data_sg_offset,
2537 if (n != data_sg_nents)
2540 pi_mr->data_iova = pi_mr->ibmr.iova;
2541 pi_mr->data_length = pi_mr->ibmr.length;
2542 pi_mr->ibmr.length = pi_mr->data_length;
2543 ibmr->length = pi_mr->data_length;
2545 if (meta_sg_nents) {
2546 u64 page_mask = ~((u64)ibmr->page_size - 1);
2547 u64 iova = pi_mr->data_iova;
2549 n += ib_sg_to_pages(&pi_mr->ibmr, meta_sg, meta_sg_nents,
2550 meta_sg_offset, mlx5_set_page_pi);
2552 pi_mr->meta_length = pi_mr->ibmr.length;
2554 * PI address for the HW is the offset of the metadata address
2555 * relative to the first data page address.
2556 * It equals to first data page address + size of data pages +
2557 * metadata offset at the first metadata page
2559 pi_mr->pi_iova = (iova & page_mask) +
2560 pi_mr->ndescs * ibmr->page_size +
2561 (pi_mr->ibmr.iova & ~page_mask);
2563 * In order to use one MTT MR for data and metadata, we register
2564 * also the gaps between the end of the data and the start of
2565 * the metadata (the sig MR will verify that the HW will access
2566 * to right addresses). This mapping is safe because we use
2567 * internal mkey for the registration.
2569 pi_mr->ibmr.length = pi_mr->pi_iova + pi_mr->meta_length - iova;
2570 pi_mr->ibmr.iova = iova;
2571 ibmr->length += pi_mr->meta_length;
2574 ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map,
2575 pi_mr->desc_size * pi_mr->max_descs,
2582 mlx5_ib_map_klm_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2583 int data_sg_nents, unsigned int *data_sg_offset,
2584 struct scatterlist *meta_sg, int meta_sg_nents,
2585 unsigned int *meta_sg_offset)
2587 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2588 struct mlx5_ib_mr *pi_mr = mr->klm_mr;
2592 pi_mr->meta_ndescs = 0;
2593 pi_mr->meta_length = 0;
2595 ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map,
2596 pi_mr->desc_size * pi_mr->max_descs,
2599 n = mlx5_ib_sg_to_klms(pi_mr, data_sg, data_sg_nents, data_sg_offset,
2600 meta_sg, meta_sg_nents, meta_sg_offset);
2602 ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map,
2603 pi_mr->desc_size * pi_mr->max_descs,
2606 /* This is zero-based memory region */
2607 pi_mr->data_iova = 0;
2608 pi_mr->ibmr.iova = 0;
2609 pi_mr->pi_iova = pi_mr->data_length;
2610 ibmr->length = pi_mr->ibmr.length;
2615 int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2616 int data_sg_nents, unsigned int *data_sg_offset,
2617 struct scatterlist *meta_sg, int meta_sg_nents,
2618 unsigned int *meta_sg_offset)
2620 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2621 struct mlx5_ib_mr *pi_mr = NULL;
2624 WARN_ON(ibmr->type != IB_MR_TYPE_INTEGRITY);
2627 mr->data_length = 0;
2629 mr->meta_ndescs = 0;
2632 * As a performance optimization, if possible, there is no need to
2633 * perform UMR operation to register the data/metadata buffers.
2634 * First try to map the sg lists to PA descriptors with local_dma_lkey.
2635 * Fallback to UMR only in case of a failure.
2637 n = mlx5_ib_map_pa_mr_sg_pi(ibmr, data_sg, data_sg_nents,
2638 data_sg_offset, meta_sg, meta_sg_nents,
2640 if (n == data_sg_nents + meta_sg_nents)
2643 * As a performance optimization, if possible, there is no need to map
2644 * the sg lists to KLM descriptors. First try to map the sg lists to MTT
2645 * descriptors and fallback to KLM only in case of a failure.
2646 * It's more efficient for the HW to work with MTT descriptors
2647 * (especially in high load).
2648 * Use KLM (indirect access) only if it's mandatory.
2651 n = mlx5_ib_map_mtt_mr_sg_pi(ibmr, data_sg, data_sg_nents,
2652 data_sg_offset, meta_sg, meta_sg_nents,
2654 if (n == data_sg_nents + meta_sg_nents)
2658 n = mlx5_ib_map_klm_mr_sg_pi(ibmr, data_sg, data_sg_nents,
2659 data_sg_offset, meta_sg, meta_sg_nents,
2661 if (unlikely(n != data_sg_nents + meta_sg_nents))
2665 /* This is zero-based memory region */
2669 ibmr->sig_attrs->meta_length = pi_mr->meta_length;
2671 ibmr->sig_attrs->meta_length = mr->meta_length;
2676 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
2677 unsigned int *sg_offset)
2679 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2684 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
2685 mr->desc_size * mr->max_descs,
2688 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
2689 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset, NULL, 0,
2692 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
2695 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
2696 mr->desc_size * mr->max_descs,