2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2020, Intel Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/kref.h>
36 #include <linux/random.h>
37 #include <linux/debugfs.h>
38 #include <linux/export.h>
39 #include <linux/delay.h>
40 #include <linux/dma-buf.h>
41 #include <linux/dma-resv.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/ib_umem_odp.h>
44 #include <rdma/ib_verbs.h>
49 * We can't use an array for xlt_emergency_page because dma_map_single doesn't
50 * work on kernel modules memory
52 void *xlt_emergency_page;
53 static DEFINE_MUTEX(xlt_emergency_page_mutex);
56 MAX_PENDING_REG_MR = 8,
59 #define MLX5_UMR_ALIGN 2048
62 create_mkey_callback(int status, struct mlx5_async_work *context);
63 static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
64 u64 iova, int access_flags,
65 unsigned int page_size, bool populate);
67 static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
70 struct mlx5_ib_dev *dev = to_mdev(pd->device);
72 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
73 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
74 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
75 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
76 MLX5_SET(mkc, mkc, lr, 1);
78 if ((acc & IB_ACCESS_RELAXED_ORDERING) &&
79 pcie_relaxed_ordering_enabled(dev->mdev->pdev)) {
80 if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
81 MLX5_SET(mkc, mkc, relaxed_ordering_write, 1);
82 if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
83 MLX5_SET(mkc, mkc, relaxed_ordering_read, 1);
86 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
87 MLX5_SET(mkc, mkc, qpn, 0xffffff);
88 MLX5_SET64(mkc, mkc, start_addr, start_addr);
91 static void assign_mkey_variant(struct mlx5_ib_dev *dev,
92 struct mlx5_ib_mkey *mkey, u32 *in)
94 u8 key = atomic_inc_return(&dev->mkey_var);
97 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
98 MLX5_SET(mkc, mkc, mkey_7_0, key);
102 static int mlx5_ib_create_mkey(struct mlx5_ib_dev *dev,
103 struct mlx5_ib_mkey *mkey, u32 *in, int inlen)
107 assign_mkey_variant(dev, mkey, in);
108 ret = mlx5_core_create_mkey(dev->mdev, &mkey->key, in, inlen);
110 init_waitqueue_head(&mkey->wait);
116 mlx5_ib_create_mkey_cb(struct mlx5_ib_dev *dev,
117 struct mlx5_ib_mkey *mkey,
118 struct mlx5_async_ctx *async_ctx,
119 u32 *in, int inlen, u32 *out, int outlen,
120 struct mlx5_async_work *context)
122 MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
123 assign_mkey_variant(dev, mkey, in);
124 return mlx5_cmd_exec_cb(async_ctx, in, inlen, out, outlen,
125 create_mkey_callback, context);
128 static int mr_cache_max_order(struct mlx5_ib_dev *dev);
129 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent);
131 static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
133 return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
136 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
138 WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key)));
140 return mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key);
143 static void create_mkey_warn(struct mlx5_ib_dev *dev, int status, void *out)
145 if (status == -ENXIO) /* core driver is not available */
148 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
149 if (status != -EREMOTEIO) /* driver specific failure */
152 /* Failed in FW, print cmd out failure details */
153 mlx5_cmd_out_err(dev->mdev, MLX5_CMD_OP_CREATE_MKEY, 0, out);
156 static void create_mkey_callback(int status, struct mlx5_async_work *context)
158 struct mlx5_ib_mr *mr =
159 container_of(context, struct mlx5_ib_mr, cb_work);
160 struct mlx5_cache_ent *ent = mr->cache_ent;
161 struct mlx5_ib_dev *dev = ent->dev;
165 create_mkey_warn(dev, status, mr->out);
167 spin_lock_irqsave(&ent->lock, flags);
169 WRITE_ONCE(dev->fill_delay, 1);
170 spin_unlock_irqrestore(&ent->lock, flags);
171 mod_timer(&dev->delay_timer, jiffies + HZ);
175 mr->mmkey.type = MLX5_MKEY_MR;
176 mr->mmkey.key |= mlx5_idx_to_mkey(
177 MLX5_GET(create_mkey_out, mr->out, mkey_index));
178 init_waitqueue_head(&mr->mmkey.wait);
180 WRITE_ONCE(dev->cache.last_add, jiffies);
182 spin_lock_irqsave(&ent->lock, flags);
183 list_add_tail(&mr->list, &ent->head);
184 ent->available_mrs++;
186 /* If we are doing fill_to_high_water then keep going. */
187 queue_adjust_cache_locked(ent);
189 spin_unlock_irqrestore(&ent->lock, flags);
192 static int get_mkc_octo_size(unsigned int access_mode, unsigned int ndescs)
196 switch (access_mode) {
197 case MLX5_MKC_ACCESS_MODE_MTT:
198 ret = DIV_ROUND_UP(ndescs, MLX5_IB_UMR_OCTOWORD /
199 sizeof(struct mlx5_mtt));
201 case MLX5_MKC_ACCESS_MODE_KSM:
202 ret = DIV_ROUND_UP(ndescs, MLX5_IB_UMR_OCTOWORD /
203 sizeof(struct mlx5_klm));
211 static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc)
213 struct mlx5_ib_mr *mr;
215 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
220 set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd);
221 MLX5_SET(mkc, mkc, free, 1);
222 MLX5_SET(mkc, mkc, umr_en, 1);
223 MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3);
224 MLX5_SET(mkc, mkc, access_mode_4_2, (ent->access_mode >> 2) & 0x7);
226 MLX5_SET(mkc, mkc, translations_octword_size,
227 get_mkc_octo_size(ent->access_mode, ent->ndescs));
228 MLX5_SET(mkc, mkc, log_page_size, ent->page);
232 /* Asynchronously schedule new MRs to be populated in the cache. */
233 static int add_keys(struct mlx5_cache_ent *ent, unsigned int num)
235 size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
236 struct mlx5_ib_mr *mr;
242 in = kzalloc(inlen, GFP_KERNEL);
246 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
247 for (i = 0; i < num; i++) {
248 mr = alloc_cache_mr(ent, mkc);
253 spin_lock_irq(&ent->lock);
254 if (ent->pending >= MAX_PENDING_REG_MR) {
256 spin_unlock_irq(&ent->lock);
261 spin_unlock_irq(&ent->lock);
262 err = mlx5_ib_create_mkey_cb(ent->dev, &mr->mmkey,
263 &ent->dev->async_ctx, in, inlen,
264 mr->out, sizeof(mr->out),
267 spin_lock_irq(&ent->lock);
269 spin_unlock_irq(&ent->lock);
270 mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err);
280 /* Synchronously create a MR in the cache */
281 static struct mlx5_ib_mr *create_cache_mr(struct mlx5_cache_ent *ent)
283 size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
284 struct mlx5_ib_mr *mr;
289 in = kzalloc(inlen, GFP_KERNEL);
291 return ERR_PTR(-ENOMEM);
292 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
294 mr = alloc_cache_mr(ent, mkc);
300 err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey.key, in, inlen);
304 init_waitqueue_head(&mr->mmkey.wait);
305 mr->mmkey.type = MLX5_MKEY_MR;
306 WRITE_ONCE(ent->dev->cache.last_add, jiffies);
307 spin_lock_irq(&ent->lock);
309 spin_unlock_irq(&ent->lock);
319 static void remove_cache_mr_locked(struct mlx5_cache_ent *ent)
321 struct mlx5_ib_mr *mr;
323 lockdep_assert_held(&ent->lock);
324 if (list_empty(&ent->head))
326 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
328 ent->available_mrs--;
330 spin_unlock_irq(&ent->lock);
331 mlx5_core_destroy_mkey(ent->dev->mdev, mr->mmkey.key);
333 spin_lock_irq(&ent->lock);
336 static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target,
341 lockdep_assert_held(&ent->lock);
345 target = ent->limit * 2;
346 if (target == ent->available_mrs + ent->pending)
348 if (target > ent->available_mrs + ent->pending) {
349 u32 todo = target - (ent->available_mrs + ent->pending);
351 spin_unlock_irq(&ent->lock);
352 err = add_keys(ent, todo);
354 usleep_range(3000, 5000);
355 spin_lock_irq(&ent->lock);
362 remove_cache_mr_locked(ent);
367 static ssize_t size_write(struct file *filp, const char __user *buf,
368 size_t count, loff_t *pos)
370 struct mlx5_cache_ent *ent = filp->private_data;
374 err = kstrtou32_from_user(buf, count, 0, &target);
379 * Target is the new value of total_mrs the user requests, however we
380 * cannot free MRs that are in use. Compute the target value for
383 spin_lock_irq(&ent->lock);
384 if (target < ent->total_mrs - ent->available_mrs) {
388 target = target - (ent->total_mrs - ent->available_mrs);
389 if (target < ent->limit || target > ent->limit*2) {
393 err = resize_available_mrs(ent, target, false);
396 spin_unlock_irq(&ent->lock);
401 spin_unlock_irq(&ent->lock);
405 static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
408 struct mlx5_cache_ent *ent = filp->private_data;
412 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->total_mrs);
416 return simple_read_from_buffer(buf, count, pos, lbuf, err);
419 static const struct file_operations size_fops = {
420 .owner = THIS_MODULE,
426 static ssize_t limit_write(struct file *filp, const char __user *buf,
427 size_t count, loff_t *pos)
429 struct mlx5_cache_ent *ent = filp->private_data;
433 err = kstrtou32_from_user(buf, count, 0, &var);
438 * Upon set we immediately fill the cache to high water mark implied by
441 spin_lock_irq(&ent->lock);
443 err = resize_available_mrs(ent, 0, true);
444 spin_unlock_irq(&ent->lock);
450 static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
453 struct mlx5_cache_ent *ent = filp->private_data;
457 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
461 return simple_read_from_buffer(buf, count, pos, lbuf, err);
464 static const struct file_operations limit_fops = {
465 .owner = THIS_MODULE,
467 .write = limit_write,
471 static bool someone_adding(struct mlx5_mr_cache *cache)
475 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
476 struct mlx5_cache_ent *ent = &cache->ent[i];
479 spin_lock_irq(&ent->lock);
480 ret = ent->available_mrs < ent->limit;
481 spin_unlock_irq(&ent->lock);
489 * Check if the bucket is outside the high/low water mark and schedule an async
490 * update. The cache refill has hysteresis, once the low water mark is hit it is
491 * refilled up to the high mark.
493 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
495 lockdep_assert_held(&ent->lock);
497 if (ent->disabled || READ_ONCE(ent->dev->fill_delay))
499 if (ent->available_mrs < ent->limit) {
500 ent->fill_to_high_water = true;
501 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
502 } else if (ent->fill_to_high_water &&
503 ent->available_mrs + ent->pending < 2 * ent->limit) {
505 * Once we start populating due to hitting a low water mark
506 * continue until we pass the high water mark.
508 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
509 } else if (ent->available_mrs == 2 * ent->limit) {
510 ent->fill_to_high_water = false;
511 } else if (ent->available_mrs > 2 * ent->limit) {
512 /* Queue deletion of excess entries */
513 ent->fill_to_high_water = false;
515 queue_delayed_work(ent->dev->cache.wq, &ent->dwork,
516 msecs_to_jiffies(1000));
518 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
522 static void __cache_work_func(struct mlx5_cache_ent *ent)
524 struct mlx5_ib_dev *dev = ent->dev;
525 struct mlx5_mr_cache *cache = &dev->cache;
528 spin_lock_irq(&ent->lock);
532 if (ent->fill_to_high_water &&
533 ent->available_mrs + ent->pending < 2 * ent->limit &&
534 !READ_ONCE(dev->fill_delay)) {
535 spin_unlock_irq(&ent->lock);
536 err = add_keys(ent, 1);
537 spin_lock_irq(&ent->lock);
542 * EAGAIN only happens if pending is positive, so we
543 * will be rescheduled from reg_mr_callback(). The only
544 * failure path here is ENOMEM.
546 if (err != -EAGAIN) {
549 "command failed order %d, err %d\n",
551 queue_delayed_work(cache->wq, &ent->dwork,
552 msecs_to_jiffies(1000));
555 } else if (ent->available_mrs > 2 * ent->limit) {
559 * The remove_cache_mr() logic is performed as garbage
560 * collection task. Such task is intended to be run when no
561 * other active processes are running.
563 * The need_resched() will return TRUE if there are user tasks
564 * to be activated in near future.
566 * In such case, we don't execute remove_cache_mr() and postpone
567 * the garbage collection work to try to run in next cycle, in
568 * order to free CPU resources to other tasks.
570 spin_unlock_irq(&ent->lock);
571 need_delay = need_resched() || someone_adding(cache) ||
573 READ_ONCE(cache->last_add) + 300 * HZ);
574 spin_lock_irq(&ent->lock);
578 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
581 remove_cache_mr_locked(ent);
582 queue_adjust_cache_locked(ent);
585 spin_unlock_irq(&ent->lock);
588 static void delayed_cache_work_func(struct work_struct *work)
590 struct mlx5_cache_ent *ent;
592 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
593 __cache_work_func(ent);
596 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
597 struct mlx5_cache_ent *ent,
600 struct mlx5_ib_mr *mr;
602 /* Matches access in alloc_cache_mr() */
603 if (!mlx5_ib_can_reconfig_with_umr(dev, 0, access_flags))
604 return ERR_PTR(-EOPNOTSUPP);
606 spin_lock_irq(&ent->lock);
607 if (list_empty(&ent->head)) {
608 queue_adjust_cache_locked(ent);
610 spin_unlock_irq(&ent->lock);
611 mr = create_cache_mr(ent);
615 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
617 ent->available_mrs--;
618 queue_adjust_cache_locked(ent);
619 spin_unlock_irq(&ent->lock);
626 static void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
628 struct mlx5_cache_ent *ent = mr->cache_ent;
630 WRITE_ONCE(dev->cache.last_add, jiffies);
631 spin_lock_irq(&ent->lock);
632 list_add_tail(&mr->list, &ent->head);
633 ent->available_mrs++;
634 queue_adjust_cache_locked(ent);
635 spin_unlock_irq(&ent->lock);
638 static void clean_keys(struct mlx5_ib_dev *dev, int c)
640 struct mlx5_mr_cache *cache = &dev->cache;
641 struct mlx5_cache_ent *ent = &cache->ent[c];
642 struct mlx5_ib_mr *tmp_mr;
643 struct mlx5_ib_mr *mr;
646 cancel_delayed_work(&ent->dwork);
648 spin_lock_irq(&ent->lock);
649 if (list_empty(&ent->head)) {
650 spin_unlock_irq(&ent->lock);
653 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
654 list_move(&mr->list, &del_list);
655 ent->available_mrs--;
657 spin_unlock_irq(&ent->lock);
658 mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key);
661 list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
667 static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
669 if (!mlx5_debugfs_root || dev->is_rep)
672 debugfs_remove_recursive(dev->cache.root);
673 dev->cache.root = NULL;
676 static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
678 struct mlx5_mr_cache *cache = &dev->cache;
679 struct mlx5_cache_ent *ent;
683 if (!mlx5_debugfs_root || dev->is_rep)
686 cache->root = debugfs_create_dir("mr_cache", mlx5_debugfs_get_dev_root(dev->mdev));
688 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
689 ent = &cache->ent[i];
690 sprintf(ent->name, "%d", ent->order);
691 dir = debugfs_create_dir(ent->name, cache->root);
692 debugfs_create_file("size", 0600, dir, ent, &size_fops);
693 debugfs_create_file("limit", 0600, dir, ent, &limit_fops);
694 debugfs_create_u32("cur", 0400, dir, &ent->available_mrs);
695 debugfs_create_u32("miss", 0600, dir, &ent->miss);
699 static void delay_time_func(struct timer_list *t)
701 struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer);
703 WRITE_ONCE(dev->fill_delay, 0);
706 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
708 struct mlx5_mr_cache *cache = &dev->cache;
709 struct mlx5_cache_ent *ent;
712 mutex_init(&dev->slow_path_mutex);
713 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
715 mlx5_ib_warn(dev, "failed to create work queue\n");
719 mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx);
720 timer_setup(&dev->delay_timer, delay_time_func, 0);
721 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
722 ent = &cache->ent[i];
723 INIT_LIST_HEAD(&ent->head);
724 spin_lock_init(&ent->lock);
729 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
731 if (i > MR_CACHE_LAST_STD_ENTRY) {
732 mlx5_odp_init_mr_cache_entry(ent);
736 if (ent->order > mr_cache_max_order(dev))
739 ent->page = PAGE_SHIFT;
740 ent->ndescs = 1 << ent->order;
741 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
742 if ((dev->mdev->profile.mask & MLX5_PROF_MASK_MR_CACHE) &&
743 !dev->is_rep && mlx5_core_is_pf(dev->mdev) &&
744 mlx5_ib_can_load_pas_with_umr(dev, 0))
745 ent->limit = dev->mdev->profile.mr_cache[i].limit;
748 spin_lock_irq(&ent->lock);
749 queue_adjust_cache_locked(ent);
750 spin_unlock_irq(&ent->lock);
753 mlx5_mr_cache_debugfs_init(dev);
758 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
765 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
766 struct mlx5_cache_ent *ent = &dev->cache.ent[i];
768 spin_lock_irq(&ent->lock);
769 ent->disabled = true;
770 spin_unlock_irq(&ent->lock);
771 cancel_delayed_work_sync(&ent->dwork);
774 mlx5_mr_cache_debugfs_cleanup(dev);
775 mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
777 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
780 destroy_workqueue(dev->cache.wq);
781 del_timer_sync(&dev->delay_timer);
786 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
788 struct mlx5_ib_dev *dev = to_mdev(pd->device);
789 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
790 struct mlx5_ib_mr *mr;
795 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
797 return ERR_PTR(-ENOMEM);
799 in = kzalloc(inlen, GFP_KERNEL);
805 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
807 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
808 MLX5_SET(mkc, mkc, length64, 1);
809 set_mkc_access_pd_addr_fields(mkc, acc | IB_ACCESS_RELAXED_ORDERING, 0,
812 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
817 mr->mmkey.type = MLX5_MKEY_MR;
818 mr->ibmr.lkey = mr->mmkey.key;
819 mr->ibmr.rkey = mr->mmkey.key;
833 static int get_octo_len(u64 addr, u64 len, int page_shift)
835 u64 page_size = 1ULL << page_shift;
839 offset = addr & (page_size - 1);
840 npages = ALIGN(len + offset, page_size) >> page_shift;
841 return (npages + 1) / 2;
844 static int mr_cache_max_order(struct mlx5_ib_dev *dev)
846 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
847 return MR_CACHE_LAST_STD_ENTRY + 2;
848 return MLX5_MAX_UMR_SHIFT;
851 static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
853 struct mlx5_ib_umr_context *context =
854 container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
856 context->status = wc->status;
857 complete(&context->done);
860 static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
862 context->cqe.done = mlx5_ib_umr_done;
863 context->status = -1;
864 init_completion(&context->done);
867 static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
868 struct mlx5_umr_wr *umrwr)
870 struct umr_common *umrc = &dev->umrc;
871 const struct ib_send_wr *bad;
873 struct mlx5_ib_umr_context umr_context;
875 mlx5_ib_init_umr_context(&umr_context);
876 umrwr->wr.wr_cqe = &umr_context.cqe;
879 err = ib_post_send(umrc->qp, &umrwr->wr, &bad);
881 mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err);
883 wait_for_completion(&umr_context.done);
884 if (umr_context.status != IB_WC_SUCCESS) {
885 mlx5_ib_warn(dev, "reg umr failed (%u)\n",
894 static struct mlx5_cache_ent *mr_cache_ent_from_order(struct mlx5_ib_dev *dev,
897 struct mlx5_mr_cache *cache = &dev->cache;
899 if (order < cache->ent[0].order)
900 return &cache->ent[0];
901 order = order - cache->ent[0].order;
902 if (order > MR_CACHE_LAST_STD_ENTRY)
904 return &cache->ent[order];
907 static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
908 u64 length, int access_flags, u64 iova)
910 mr->ibmr.lkey = mr->mmkey.key;
911 mr->ibmr.rkey = mr->mmkey.key;
912 mr->ibmr.length = length;
913 mr->ibmr.device = &dev->ib_dev;
914 mr->ibmr.iova = iova;
915 mr->access_flags = access_flags;
918 static unsigned int mlx5_umem_dmabuf_default_pgsz(struct ib_umem *umem,
922 * The alignment of iova has already been checked upon entering
923 * UVERBS_METHOD_REG_DMABUF_MR
929 static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
930 struct ib_umem *umem, u64 iova,
933 struct mlx5_ib_dev *dev = to_mdev(pd->device);
934 struct mlx5_cache_ent *ent;
935 struct mlx5_ib_mr *mr;
936 unsigned int page_size;
939 page_size = mlx5_umem_dmabuf_default_pgsz(umem, iova);
941 page_size = mlx5_umem_find_best_pgsz(umem, mkc, log_page_size,
943 if (WARN_ON(!page_size))
944 return ERR_PTR(-EINVAL);
945 ent = mr_cache_ent_from_order(
946 dev, order_base_2(ib_umem_num_dma_blocks(umem, page_size)));
948 * Matches access in alloc_cache_mr(). If the MR can't come from the
949 * cache then synchronously create an uncached one.
951 if (!ent || ent->limit == 0 ||
952 !mlx5_ib_can_reconfig_with_umr(dev, 0, access_flags)) {
953 mutex_lock(&dev->slow_path_mutex);
954 mr = reg_create(pd, umem, iova, access_flags, page_size, false);
955 mutex_unlock(&dev->slow_path_mutex);
959 mr = mlx5_mr_cache_alloc(dev, ent, access_flags);
965 mr->page_shift = order_base_2(page_size);
966 set_mr_fields(dev, mr, umem->length, access_flags, iova);
971 #define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \
972 MLX5_UMR_MTT_ALIGNMENT)
973 #define MLX5_SPARE_UMR_CHUNK 0x10000
976 * Allocate a temporary buffer to hold the per-page information to transfer to
977 * HW. For efficiency this should be as large as it can be, but buffer
978 * allocation failure is not allowed, so try smaller sizes.
980 static void *mlx5_ib_alloc_xlt(size_t *nents, size_t ent_size, gfp_t gfp_mask)
982 const size_t xlt_chunk_align =
983 MLX5_UMR_MTT_ALIGNMENT / ent_size;
987 static_assert(PAGE_SIZE % MLX5_UMR_MTT_ALIGNMENT == 0);
990 * MLX5_IB_UPD_XLT_ATOMIC doesn't signal an atomic context just that the
991 * allocation can't trigger any kind of reclaim.
995 gfp_mask |= __GFP_ZERO | __GFP_NORETRY;
998 * If the system already has a suitable high order page then just use
999 * that, but don't try hard to create one. This max is about 1M, so a
1000 * free x86 huge page will satisfy it.
1002 size = min_t(size_t, ent_size * ALIGN(*nents, xlt_chunk_align),
1003 MLX5_MAX_UMR_CHUNK);
1004 *nents = size / ent_size;
1005 res = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
1010 if (size > MLX5_SPARE_UMR_CHUNK) {
1011 size = MLX5_SPARE_UMR_CHUNK;
1012 *nents = size / ent_size;
1013 res = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
1019 *nents = PAGE_SIZE / ent_size;
1020 res = (void *)__get_free_page(gfp_mask);
1024 mutex_lock(&xlt_emergency_page_mutex);
1025 memset(xlt_emergency_page, 0, PAGE_SIZE);
1026 return xlt_emergency_page;
1029 static void mlx5_ib_free_xlt(void *xlt, size_t length)
1031 if (xlt == xlt_emergency_page) {
1032 mutex_unlock(&xlt_emergency_page_mutex);
1036 free_pages((unsigned long)xlt, get_order(length));
1040 * Create a MLX5_IB_SEND_UMR_UPDATE_XLT work request and XLT buffer ready for
1043 static void *mlx5_ib_create_xlt_wr(struct mlx5_ib_mr *mr,
1044 struct mlx5_umr_wr *wr, struct ib_sge *sg,
1045 size_t nents, size_t ent_size,
1048 struct mlx5_ib_dev *dev = mr_to_mdev(mr);
1049 struct device *ddev = &dev->mdev->pdev->dev;
1053 xlt = mlx5_ib_alloc_xlt(&nents, ent_size,
1054 flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC :
1056 sg->length = nents * ent_size;
1057 dma = dma_map_single(ddev, xlt, sg->length, DMA_TO_DEVICE);
1058 if (dma_mapping_error(ddev, dma)) {
1059 mlx5_ib_err(dev, "unable to map DMA during XLT update.\n");
1060 mlx5_ib_free_xlt(xlt, sg->length);
1064 sg->lkey = dev->umrc.pd->local_dma_lkey;
1066 memset(wr, 0, sizeof(*wr));
1067 wr->wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT;
1068 if (!(flags & MLX5_IB_UPD_XLT_ENABLE))
1069 wr->wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1070 wr->wr.sg_list = sg;
1072 wr->wr.opcode = MLX5_IB_WR_UMR;
1073 wr->pd = mr->ibmr.pd;
1074 wr->mkey = mr->mmkey.key;
1075 wr->length = mr->ibmr.length;
1076 wr->virt_addr = mr->ibmr.iova;
1077 wr->access_flags = mr->access_flags;
1078 wr->page_shift = mr->page_shift;
1079 wr->xlt_size = sg->length;
1083 static void mlx5_ib_unmap_free_xlt(struct mlx5_ib_dev *dev, void *xlt,
1086 struct device *ddev = &dev->mdev->pdev->dev;
1088 dma_unmap_single(ddev, sg->addr, sg->length, DMA_TO_DEVICE);
1089 mlx5_ib_free_xlt(xlt, sg->length);
1092 static unsigned int xlt_wr_final_send_flags(unsigned int flags)
1094 unsigned int res = 0;
1096 if (flags & MLX5_IB_UPD_XLT_ENABLE)
1097 res |= MLX5_IB_SEND_UMR_ENABLE_MR |
1098 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS |
1099 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1100 if (flags & MLX5_IB_UPD_XLT_PD || flags & MLX5_IB_UPD_XLT_ACCESS)
1101 res |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
1102 if (flags & MLX5_IB_UPD_XLT_ADDR)
1103 res |= MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1107 int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
1108 int page_shift, int flags)
1110 struct mlx5_ib_dev *dev = mr_to_mdev(mr);
1111 struct device *ddev = &dev->mdev->pdev->dev;
1113 struct mlx5_umr_wr wr;
1116 int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT)
1117 ? sizeof(struct mlx5_klm)
1118 : sizeof(struct mlx5_mtt);
1119 const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size;
1120 const int page_mask = page_align - 1;
1121 size_t pages_mapped = 0;
1122 size_t pages_to_map = 0;
1124 size_t size_to_map = 0;
1125 size_t orig_sg_length;
1127 if ((flags & MLX5_IB_UPD_XLT_INDIRECT) &&
1128 !umr_can_use_indirect_mkey(dev))
1131 if (WARN_ON(!mr->umem->is_odp))
1134 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
1135 * so we need to align the offset and length accordingly
1137 if (idx & page_mask) {
1138 npages += idx & page_mask;
1141 pages_to_map = ALIGN(npages, page_align);
1143 xlt = mlx5_ib_create_xlt_wr(mr, &wr, &sg, npages, desc_size, flags);
1146 pages_iter = sg.length / desc_size;
1147 orig_sg_length = sg.length;
1149 if (!(flags & MLX5_IB_UPD_XLT_INDIRECT)) {
1150 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
1151 size_t max_pages = ib_umem_odp_num_pages(odp) - idx;
1153 pages_to_map = min_t(size_t, pages_to_map, max_pages);
1156 wr.page_shift = page_shift;
1158 for (pages_mapped = 0;
1159 pages_mapped < pages_to_map && !err;
1160 pages_mapped += pages_iter, idx += pages_iter) {
1161 npages = min_t(int, pages_iter, pages_to_map - pages_mapped);
1162 size_to_map = npages * desc_size;
1163 dma_sync_single_for_cpu(ddev, sg.addr, sg.length,
1165 mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags);
1166 dma_sync_single_for_device(ddev, sg.addr, sg.length,
1169 sg.length = ALIGN(size_to_map, MLX5_UMR_MTT_ALIGNMENT);
1171 if (pages_mapped + pages_iter >= pages_to_map)
1172 wr.wr.send_flags |= xlt_wr_final_send_flags(flags);
1174 wr.offset = idx * desc_size;
1175 wr.xlt_size = sg.length;
1177 err = mlx5_ib_post_send_wait(dev, &wr);
1179 sg.length = orig_sg_length;
1180 mlx5_ib_unmap_free_xlt(dev, xlt, &sg);
1185 * Send the DMA list to the HW for a normal MR using UMR.
1186 * Dmabuf MR is handled in a similar way, except that the MLX5_IB_UPD_XLT_ZAP
1189 int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
1191 struct mlx5_ib_dev *dev = mr_to_mdev(mr);
1192 struct device *ddev = &dev->mdev->pdev->dev;
1193 struct ib_block_iter biter;
1194 struct mlx5_mtt *cur_mtt;
1195 struct mlx5_umr_wr wr;
1196 size_t orig_sg_length;
1197 struct mlx5_mtt *mtt;
1202 if (WARN_ON(mr->umem->is_odp))
1205 mtt = mlx5_ib_create_xlt_wr(mr, &wr, &sg,
1206 ib_umem_num_dma_blocks(mr->umem,
1207 1 << mr->page_shift),
1208 sizeof(*mtt), flags);
1211 orig_sg_length = sg.length;
1214 rdma_for_each_block (mr->umem->sgt_append.sgt.sgl, &biter,
1215 mr->umem->sgt_append.sgt.nents,
1216 BIT(mr->page_shift)) {
1217 if (cur_mtt == (void *)mtt + sg.length) {
1218 dma_sync_single_for_device(ddev, sg.addr, sg.length,
1220 err = mlx5_ib_post_send_wait(dev, &wr);
1223 dma_sync_single_for_cpu(ddev, sg.addr, sg.length,
1225 wr.offset += sg.length;
1230 cpu_to_be64(rdma_block_iter_dma_address(&biter) |
1231 MLX5_IB_MTT_PRESENT);
1233 if (mr->umem->is_dmabuf && (flags & MLX5_IB_UPD_XLT_ZAP))
1239 final_size = (void *)cur_mtt - (void *)mtt;
1240 sg.length = ALIGN(final_size, MLX5_UMR_MTT_ALIGNMENT);
1241 memset(cur_mtt, 0, sg.length - final_size);
1242 wr.wr.send_flags |= xlt_wr_final_send_flags(flags);
1243 wr.xlt_size = sg.length;
1245 dma_sync_single_for_device(ddev, sg.addr, sg.length, DMA_TO_DEVICE);
1246 err = mlx5_ib_post_send_wait(dev, &wr);
1249 sg.length = orig_sg_length;
1250 mlx5_ib_unmap_free_xlt(dev, mtt, &sg);
1255 * If ibmr is NULL it will be allocated by reg_create.
1256 * Else, the given ibmr will be used.
1258 static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
1259 u64 iova, int access_flags,
1260 unsigned int page_size, bool populate)
1262 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1263 struct mlx5_ib_mr *mr;
1269 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
1272 return ERR_PTR(-EINVAL);
1273 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1275 return ERR_PTR(-ENOMEM);
1278 mr->access_flags = access_flags;
1279 mr->page_shift = order_base_2(page_size);
1281 inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1283 inlen += sizeof(*pas) *
1284 roundup(ib_umem_num_dma_blocks(umem, page_size), 2);
1285 in = kvzalloc(inlen, GFP_KERNEL);
1290 pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
1292 if (WARN_ON(access_flags & IB_ACCESS_ON_DEMAND)) {
1296 mlx5_ib_populate_pas(umem, 1UL << mr->page_shift, pas,
1297 pg_cap ? MLX5_IB_MTT_PRESENT : 0);
1300 /* The pg_access bit allows setting the access flags
1301 * in the page list submitted with the command. */
1302 MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
1304 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1305 set_mkc_access_pd_addr_fields(mkc, access_flags, iova,
1306 populate ? pd : dev->umrc.pd);
1307 MLX5_SET(mkc, mkc, free, !populate);
1308 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
1309 MLX5_SET(mkc, mkc, umr_en, 1);
1311 MLX5_SET64(mkc, mkc, len, umem->length);
1312 MLX5_SET(mkc, mkc, bsf_octword_size, 0);
1313 MLX5_SET(mkc, mkc, translations_octword_size,
1314 get_octo_len(iova, umem->length, mr->page_shift));
1315 MLX5_SET(mkc, mkc, log_page_size, mr->page_shift);
1317 MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
1318 get_octo_len(iova, umem->length, mr->page_shift));
1321 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
1323 mlx5_ib_warn(dev, "create mkey failed\n");
1326 mr->mmkey.type = MLX5_MKEY_MR;
1328 set_mr_fields(dev, mr, umem->length, access_flags, iova);
1331 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
1339 return ERR_PTR(err);
1342 static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr,
1343 u64 length, int acc, int mode)
1345 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1346 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1347 struct mlx5_ib_mr *mr;
1352 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1354 return ERR_PTR(-ENOMEM);
1356 in = kzalloc(inlen, GFP_KERNEL);
1362 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1364 MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
1365 MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7);
1366 MLX5_SET64(mkc, mkc, len, length);
1367 set_mkc_access_pd_addr_fields(mkc, acc, start_addr, pd);
1369 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
1375 set_mr_fields(dev, mr, length, acc, start_addr);
1385 return ERR_PTR(err);
1388 int mlx5_ib_advise_mr(struct ib_pd *pd,
1389 enum ib_uverbs_advise_mr_advice advice,
1391 struct ib_sge *sg_list,
1393 struct uverbs_attr_bundle *attrs)
1395 if (advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH &&
1396 advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
1397 advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT)
1400 return mlx5_ib_advise_mr_prefetch(pd, advice, flags,
1404 struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
1405 struct ib_dm_mr_attr *attr,
1406 struct uverbs_attr_bundle *attrs)
1408 struct mlx5_ib_dm *mdm = to_mdm(dm);
1409 struct mlx5_core_dev *dev = to_mdev(dm->device)->mdev;
1410 u64 start_addr = mdm->dev_addr + attr->offset;
1413 switch (mdm->type) {
1414 case MLX5_IB_UAPI_DM_TYPE_MEMIC:
1415 if (attr->access_flags & ~MLX5_IB_DM_MEMIC_ALLOWED_ACCESS)
1416 return ERR_PTR(-EINVAL);
1418 mode = MLX5_MKC_ACCESS_MODE_MEMIC;
1419 start_addr -= pci_resource_start(dev->pdev, 0);
1421 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
1422 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
1423 if (attr->access_flags & ~MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS)
1424 return ERR_PTR(-EINVAL);
1426 mode = MLX5_MKC_ACCESS_MODE_SW_ICM;
1429 return ERR_PTR(-EINVAL);
1432 return mlx5_ib_get_dm_mr(pd, start_addr, attr->length,
1433 attr->access_flags, mode);
1436 static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem,
1437 u64 iova, int access_flags)
1439 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1440 struct mlx5_ib_mr *mr = NULL;
1444 xlt_with_umr = mlx5_ib_can_load_pas_with_umr(dev, umem->length);
1446 mr = alloc_cacheable_mr(pd, umem, iova, access_flags);
1448 unsigned int page_size = mlx5_umem_find_best_pgsz(
1449 umem, mkc, log_page_size, 0, iova);
1451 mutex_lock(&dev->slow_path_mutex);
1452 mr = reg_create(pd, umem, iova, access_flags, page_size, true);
1453 mutex_unlock(&dev->slow_path_mutex);
1456 ib_umem_release(umem);
1457 return ERR_CAST(mr);
1460 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
1462 atomic_add(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages);
1466 * If the MR was created with reg_create then it will be
1467 * configured properly but left disabled. It is safe to go ahead
1468 * and configure it again via UMR while enabling it.
1470 err = mlx5_ib_update_mr_pas(mr, MLX5_IB_UPD_XLT_ENABLE);
1472 mlx5_ib_dereg_mr(&mr->ibmr, NULL);
1473 return ERR_PTR(err);
1479 static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
1480 u64 iova, int access_flags,
1481 struct ib_udata *udata)
1483 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1484 struct ib_umem_odp *odp;
1485 struct mlx5_ib_mr *mr;
1488 if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1489 return ERR_PTR(-EOPNOTSUPP);
1491 err = mlx5r_odp_create_eq(dev, &dev->odp_pf_eq);
1493 return ERR_PTR(err);
1494 if (!start && length == U64_MAX) {
1496 return ERR_PTR(-EINVAL);
1497 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1498 return ERR_PTR(-EINVAL);
1500 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags);
1502 return ERR_CAST(mr);
1506 /* ODP requires xlt update via umr to work. */
1507 if (!mlx5_ib_can_load_pas_with_umr(dev, length))
1508 return ERR_PTR(-EINVAL);
1510 odp = ib_umem_odp_get(&dev->ib_dev, start, length, access_flags,
1513 return ERR_CAST(odp);
1515 mr = alloc_cacheable_mr(pd, &odp->umem, iova, access_flags);
1517 ib_umem_release(&odp->umem);
1518 return ERR_CAST(mr);
1520 xa_init(&mr->implicit_children);
1523 err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
1527 err = mlx5_ib_init_odp_mr(mr);
1533 mlx5_ib_dereg_mr(&mr->ibmr, NULL);
1534 return ERR_PTR(err);
1537 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1538 u64 iova, int access_flags,
1539 struct ib_udata *udata)
1541 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1542 struct ib_umem *umem;
1544 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
1545 return ERR_PTR(-EOPNOTSUPP);
1547 mlx5_ib_dbg(dev, "start 0x%llx, iova 0x%llx, length 0x%llx, access_flags 0x%x\n",
1548 start, iova, length, access_flags);
1550 if (access_flags & IB_ACCESS_ON_DEMAND)
1551 return create_user_odp_mr(pd, start, length, iova, access_flags,
1553 umem = ib_umem_get(&dev->ib_dev, start, length, access_flags);
1555 return ERR_CAST(umem);
1556 return create_real_mr(pd, umem, iova, access_flags);
1559 static void mlx5_ib_dmabuf_invalidate_cb(struct dma_buf_attachment *attach)
1561 struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv;
1562 struct mlx5_ib_mr *mr = umem_dmabuf->private;
1564 dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
1566 if (!umem_dmabuf->sgt)
1569 mlx5_ib_update_mr_pas(mr, MLX5_IB_UPD_XLT_ZAP);
1570 ib_umem_dmabuf_unmap_pages(umem_dmabuf);
1573 static struct dma_buf_attach_ops mlx5_ib_dmabuf_attach_ops = {
1574 .allow_peer2peer = 1,
1575 .move_notify = mlx5_ib_dmabuf_invalidate_cb,
1578 struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset,
1579 u64 length, u64 virt_addr,
1580 int fd, int access_flags,
1581 struct ib_udata *udata)
1583 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1584 struct mlx5_ib_mr *mr = NULL;
1585 struct ib_umem_dmabuf *umem_dmabuf;
1588 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) ||
1589 !IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1590 return ERR_PTR(-EOPNOTSUPP);
1593 "offset 0x%llx, virt_addr 0x%llx, length 0x%llx, fd %d, access_flags 0x%x\n",
1594 offset, virt_addr, length, fd, access_flags);
1596 /* dmabuf requires xlt update via umr to work. */
1597 if (!mlx5_ib_can_load_pas_with_umr(dev, length))
1598 return ERR_PTR(-EINVAL);
1600 umem_dmabuf = ib_umem_dmabuf_get(&dev->ib_dev, offset, length, fd,
1602 &mlx5_ib_dmabuf_attach_ops);
1603 if (IS_ERR(umem_dmabuf)) {
1604 mlx5_ib_dbg(dev, "umem_dmabuf get failed (%ld)\n",
1605 PTR_ERR(umem_dmabuf));
1606 return ERR_CAST(umem_dmabuf);
1609 mr = alloc_cacheable_mr(pd, &umem_dmabuf->umem, virt_addr,
1612 ib_umem_release(&umem_dmabuf->umem);
1613 return ERR_CAST(mr);
1616 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
1618 atomic_add(ib_umem_num_pages(mr->umem), &dev->mdev->priv.reg_pages);
1619 umem_dmabuf->private = mr;
1620 err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
1624 err = mlx5_ib_init_dmabuf_mr(mr);
1630 mlx5_ib_dereg_mr(&mr->ibmr, NULL);
1631 return ERR_PTR(err);
1635 * revoke_mr - Fence all DMA on the MR
1636 * @mr: The MR to fence
1638 * Upon return the NIC will not be doing any DMA to the pages under the MR,
1639 * and any DMA in progress will be completed. Failure of this function
1640 * indicates the HW has failed catastrophically.
1642 static int revoke_mr(struct mlx5_ib_mr *mr)
1644 struct mlx5_umr_wr umrwr = {};
1646 if (mr_to_mdev(mr)->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
1649 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
1650 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
1651 umrwr.wr.opcode = MLX5_IB_WR_UMR;
1652 umrwr.pd = mr_to_mdev(mr)->umrc.pd;
1653 umrwr.mkey = mr->mmkey.key;
1654 umrwr.ignore_free_state = 1;
1656 return mlx5_ib_post_send_wait(mr_to_mdev(mr), &umrwr);
1660 * True if the change in access flags can be done via UMR, only some access
1661 * flags can be updated.
1663 static bool can_use_umr_rereg_access(struct mlx5_ib_dev *dev,
1664 unsigned int current_access_flags,
1665 unsigned int target_access_flags)
1667 unsigned int diffs = current_access_flags ^ target_access_flags;
1669 if (diffs & ~(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
1670 IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING))
1672 return mlx5_ib_can_reconfig_with_umr(dev, current_access_flags,
1673 target_access_flags);
1676 static int umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
1679 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
1680 struct mlx5_umr_wr umrwr = {
1682 .send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE |
1683 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS,
1684 .opcode = MLX5_IB_WR_UMR,
1686 .mkey = mr->mmkey.key,
1688 .access_flags = access_flags,
1692 err = mlx5_ib_post_send_wait(dev, &umrwr);
1696 mr->access_flags = access_flags;
1700 static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr,
1701 struct ib_umem *new_umem,
1702 int new_access_flags, u64 iova,
1703 unsigned long *page_size)
1705 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
1707 /* We only track the allocated sizes of MRs from the cache */
1710 if (!mlx5_ib_can_load_pas_with_umr(dev, new_umem->length))
1714 mlx5_umem_find_best_pgsz(new_umem, mkc, log_page_size, 0, iova);
1715 if (WARN_ON(!*page_size))
1717 return (1ULL << mr->cache_ent->order) >=
1718 ib_umem_num_dma_blocks(new_umem, *page_size);
1721 static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd,
1722 int access_flags, int flags, struct ib_umem *new_umem,
1723 u64 iova, unsigned long page_size)
1725 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
1726 int upd_flags = MLX5_IB_UPD_XLT_ADDR | MLX5_IB_UPD_XLT_ENABLE;
1727 struct ib_umem *old_umem = mr->umem;
1731 * To keep everything simple the MR is revoked before we start to mess
1732 * with it. This ensure the change is atomic relative to any use of the
1735 err = revoke_mr(mr);
1739 if (flags & IB_MR_REREG_PD) {
1741 upd_flags |= MLX5_IB_UPD_XLT_PD;
1743 if (flags & IB_MR_REREG_ACCESS) {
1744 mr->access_flags = access_flags;
1745 upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
1748 mr->ibmr.length = new_umem->length;
1749 mr->ibmr.iova = iova;
1750 mr->ibmr.length = new_umem->length;
1751 mr->page_shift = order_base_2(page_size);
1752 mr->umem = new_umem;
1753 err = mlx5_ib_update_mr_pas(mr, upd_flags);
1756 * The MR is revoked at this point so there is no issue to free
1759 mr->umem = old_umem;
1763 atomic_sub(ib_umem_num_pages(old_umem), &dev->mdev->priv.reg_pages);
1764 ib_umem_release(old_umem);
1765 atomic_add(ib_umem_num_pages(new_umem), &dev->mdev->priv.reg_pages);
1769 struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1770 u64 length, u64 iova, int new_access_flags,
1771 struct ib_pd *new_pd,
1772 struct ib_udata *udata)
1774 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
1775 struct mlx5_ib_mr *mr = to_mmr(ib_mr);
1778 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
1779 return ERR_PTR(-EOPNOTSUPP);
1783 "start 0x%llx, iova 0x%llx, length 0x%llx, access_flags 0x%x\n",
1784 start, iova, length, new_access_flags);
1786 if (flags & ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS))
1787 return ERR_PTR(-EOPNOTSUPP);
1789 if (!(flags & IB_MR_REREG_ACCESS))
1790 new_access_flags = mr->access_flags;
1791 if (!(flags & IB_MR_REREG_PD))
1794 if (!(flags & IB_MR_REREG_TRANS)) {
1795 struct ib_umem *umem;
1797 /* Fast path for PD/access change */
1798 if (can_use_umr_rereg_access(dev, mr->access_flags,
1799 new_access_flags)) {
1800 err = umr_rereg_pd_access(mr, new_pd, new_access_flags);
1802 return ERR_PTR(err);
1805 /* DM or ODP MR's don't have a normal umem so we can't re-use it */
1806 if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr))
1810 * Only one active MR can refer to a umem at one time, revoke
1811 * the old MR before assigning the umem to the new one.
1813 err = revoke_mr(mr);
1815 return ERR_PTR(err);
1818 atomic_sub(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages);
1820 return create_real_mr(new_pd, umem, mr->ibmr.iova,
1825 * DM doesn't have a PAS list so we can't re-use it, odp/dmabuf does
1826 * but the logic around releasing the umem is different
1828 if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr))
1831 if (!(new_access_flags & IB_ACCESS_ON_DEMAND) &&
1832 can_use_umr_rereg_access(dev, mr->access_flags, new_access_flags)) {
1833 struct ib_umem *new_umem;
1834 unsigned long page_size;
1836 new_umem = ib_umem_get(&dev->ib_dev, start, length,
1838 if (IS_ERR(new_umem))
1839 return ERR_CAST(new_umem);
1841 /* Fast path for PAS change */
1842 if (can_use_umr_rereg_pas(mr, new_umem, new_access_flags, iova,
1844 err = umr_rereg_pas(mr, new_pd, new_access_flags, flags,
1845 new_umem, iova, page_size);
1847 ib_umem_release(new_umem);
1848 return ERR_PTR(err);
1852 return create_real_mr(new_pd, new_umem, iova, new_access_flags);
1856 * Everything else has no state we can preserve, just create a new MR
1860 return mlx5_ib_reg_user_mr(new_pd, start, length, iova,
1861 new_access_flags, udata);
1865 mlx5_alloc_priv_descs(struct ib_device *device,
1866 struct mlx5_ib_mr *mr,
1870 struct mlx5_ib_dev *dev = to_mdev(device);
1871 struct device *ddev = &dev->mdev->pdev->dev;
1872 int size = ndescs * desc_size;
1876 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
1878 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
1879 if (!mr->descs_alloc)
1882 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
1884 mr->desc_map = dma_map_single(ddev, mr->descs, size, DMA_TO_DEVICE);
1885 if (dma_mapping_error(ddev, mr->desc_map)) {
1892 kfree(mr->descs_alloc);
1898 mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1900 if (!mr->umem && mr->descs) {
1901 struct ib_device *device = mr->ibmr.device;
1902 int size = mr->max_descs * mr->desc_size;
1903 struct mlx5_ib_dev *dev = to_mdev(device);
1905 dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size,
1907 kfree(mr->descs_alloc);
1912 int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1914 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1915 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
1919 * Any async use of the mr must hold the refcount, once the refcount
1920 * goes to zero no other thread, such as ODP page faults, prefetch, any
1921 * UMR activity, etc can touch the mkey. Thus it is safe to destroy it.
1923 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) &&
1924 refcount_read(&mr->mmkey.usecount) != 0 &&
1925 xa_erase(&mr_to_mdev(mr)->odp_mkeys, mlx5_base_mkey(mr->mmkey.key)))
1926 mlx5r_deref_wait_odp_mkey(&mr->mmkey);
1928 if (ibmr->type == IB_MR_TYPE_INTEGRITY) {
1929 xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key),
1930 mr->sig, NULL, GFP_KERNEL);
1933 rc = mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL);
1939 rc = mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL);
1945 if (mlx5_core_destroy_psv(dev->mdev,
1946 mr->sig->psv_memory.psv_idx))
1947 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1948 mr->sig->psv_memory.psv_idx);
1949 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx))
1950 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1951 mr->sig->psv_wire.psv_idx);
1957 if (mr->cache_ent) {
1958 if (revoke_mr(mr)) {
1959 spin_lock_irq(&mr->cache_ent->lock);
1960 mr->cache_ent->total_mrs--;
1961 spin_unlock_irq(&mr->cache_ent->lock);
1962 mr->cache_ent = NULL;
1965 if (!mr->cache_ent) {
1966 rc = destroy_mkey(to_mdev(mr->ibmr.device), mr);
1972 bool is_odp = is_odp_mr(mr);
1975 atomic_sub(ib_umem_num_pages(mr->umem),
1976 &dev->mdev->priv.reg_pages);
1977 ib_umem_release(mr->umem);
1979 mlx5_ib_free_odp_mr(mr);
1982 if (mr->cache_ent) {
1983 mlx5_mr_cache_free(dev, mr);
1985 mlx5_free_priv_descs(mr);
1991 static void mlx5_set_umr_free_mkey(struct ib_pd *pd, u32 *in, int ndescs,
1992 int access_mode, int page_shift)
1996 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1998 /* This is only used from the kernel, so setting the PD is OK. */
1999 set_mkc_access_pd_addr_fields(mkc, IB_ACCESS_RELAXED_ORDERING, 0, pd);
2000 MLX5_SET(mkc, mkc, free, 1);
2001 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
2002 MLX5_SET(mkc, mkc, access_mode_1_0, access_mode & 0x3);
2003 MLX5_SET(mkc, mkc, access_mode_4_2, (access_mode >> 2) & 0x7);
2004 MLX5_SET(mkc, mkc, umr_en, 1);
2005 MLX5_SET(mkc, mkc, log_page_size, page_shift);
2008 static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
2009 int ndescs, int desc_size, int page_shift,
2010 int access_mode, u32 *in, int inlen)
2012 struct mlx5_ib_dev *dev = to_mdev(pd->device);
2015 mr->access_mode = access_mode;
2016 mr->desc_size = desc_size;
2017 mr->max_descs = ndescs;
2019 err = mlx5_alloc_priv_descs(pd->device, mr, ndescs, desc_size);
2023 mlx5_set_umr_free_mkey(pd, in, ndescs, access_mode, page_shift);
2025 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
2027 goto err_free_descs;
2029 mr->mmkey.type = MLX5_MKEY_MR;
2030 mr->ibmr.lkey = mr->mmkey.key;
2031 mr->ibmr.rkey = mr->mmkey.key;
2036 mlx5_free_priv_descs(mr);
2040 static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd,
2041 u32 max_num_sg, u32 max_num_meta_sg,
2042 int desc_size, int access_mode)
2044 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
2045 int ndescs = ALIGN(max_num_sg + max_num_meta_sg, 4);
2047 struct mlx5_ib_mr *mr;
2051 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2053 return ERR_PTR(-ENOMEM);
2056 mr->ibmr.device = pd->device;
2058 in = kzalloc(inlen, GFP_KERNEL);
2064 if (access_mode == MLX5_MKC_ACCESS_MODE_MTT)
2065 page_shift = PAGE_SHIFT;
2067 err = _mlx5_alloc_mkey_descs(pd, mr, ndescs, desc_size, page_shift,
2068 access_mode, in, inlen);
2081 return ERR_PTR(err);
2084 static int mlx5_alloc_mem_reg_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
2085 int ndescs, u32 *in, int inlen)
2087 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_mtt),
2088 PAGE_SHIFT, MLX5_MKC_ACCESS_MODE_MTT, in,
2092 static int mlx5_alloc_sg_gaps_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
2093 int ndescs, u32 *in, int inlen)
2095 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_klm),
2096 0, MLX5_MKC_ACCESS_MODE_KLMS, in, inlen);
2099 static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
2100 int max_num_sg, int max_num_meta_sg,
2103 struct mlx5_ib_dev *dev = to_mdev(pd->device);
2108 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
2112 /* create mem & wire PSVs */
2113 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn, 2, psv_index);
2117 mr->sig->psv_memory.psv_idx = psv_index[0];
2118 mr->sig->psv_wire.psv_idx = psv_index[1];
2120 mr->sig->sig_status_checked = true;
2121 mr->sig->sig_err_exists = false;
2122 /* Next UMR, Arm SIGERR */
2123 ++mr->sig->sigerr_count;
2124 mr->klm_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg,
2125 sizeof(struct mlx5_klm),
2126 MLX5_MKC_ACCESS_MODE_KLMS);
2127 if (IS_ERR(mr->klm_mr)) {
2128 err = PTR_ERR(mr->klm_mr);
2129 goto err_destroy_psv;
2131 mr->mtt_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg,
2132 sizeof(struct mlx5_mtt),
2133 MLX5_MKC_ACCESS_MODE_MTT);
2134 if (IS_ERR(mr->mtt_mr)) {
2135 err = PTR_ERR(mr->mtt_mr);
2136 goto err_free_klm_mr;
2139 /* Set bsf descriptors for mkey */
2140 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
2141 MLX5_SET(mkc, mkc, bsf_en, 1);
2142 MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
2144 err = _mlx5_alloc_mkey_descs(pd, mr, 4, sizeof(struct mlx5_klm), 0,
2145 MLX5_MKC_ACCESS_MODE_KLMS, in, inlen);
2147 goto err_free_mtt_mr;
2149 err = xa_err(xa_store(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key),
2150 mr->sig, GFP_KERNEL));
2152 goto err_free_descs;
2156 destroy_mkey(dev, mr);
2157 mlx5_free_priv_descs(mr);
2159 mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL);
2162 mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL);
2165 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx))
2166 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
2167 mr->sig->psv_memory.psv_idx);
2168 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx))
2169 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
2170 mr->sig->psv_wire.psv_idx);
2177 static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd,
2178 enum ib_mr_type mr_type, u32 max_num_sg,
2179 u32 max_num_meta_sg)
2181 struct mlx5_ib_dev *dev = to_mdev(pd->device);
2182 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
2183 int ndescs = ALIGN(max_num_sg, 4);
2184 struct mlx5_ib_mr *mr;
2188 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2190 return ERR_PTR(-ENOMEM);
2192 in = kzalloc(inlen, GFP_KERNEL);
2198 mr->ibmr.device = pd->device;
2202 case IB_MR_TYPE_MEM_REG:
2203 err = mlx5_alloc_mem_reg_descs(pd, mr, ndescs, in, inlen);
2205 case IB_MR_TYPE_SG_GAPS:
2206 err = mlx5_alloc_sg_gaps_descs(pd, mr, ndescs, in, inlen);
2208 case IB_MR_TYPE_INTEGRITY:
2209 err = mlx5_alloc_integrity_descs(pd, mr, max_num_sg,
2210 max_num_meta_sg, in, inlen);
2213 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
2228 return ERR_PTR(err);
2231 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
2234 return __mlx5_ib_alloc_mr(pd, mr_type, max_num_sg, 0);
2237 struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd,
2238 u32 max_num_sg, u32 max_num_meta_sg)
2240 return __mlx5_ib_alloc_mr(pd, IB_MR_TYPE_INTEGRITY, max_num_sg,
2244 int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
2246 struct mlx5_ib_dev *dev = to_mdev(ibmw->device);
2247 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
2248 struct mlx5_ib_mw *mw = to_mmw(ibmw);
2249 unsigned int ndescs;
2253 struct mlx5_ib_alloc_mw req = {};
2256 __u32 response_length;
2259 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
2263 if (req.comp_mask || req.reserved1 || req.reserved2)
2266 if (udata->inlen > sizeof(req) &&
2267 !ib_is_udata_cleared(udata, sizeof(req),
2268 udata->inlen - sizeof(req)))
2271 ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
2273 in = kzalloc(inlen, GFP_KERNEL);
2279 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
2281 MLX5_SET(mkc, mkc, free, 1);
2282 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
2283 MLX5_SET(mkc, mkc, pd, to_mpd(ibmw->pd)->pdn);
2284 MLX5_SET(mkc, mkc, umr_en, 1);
2285 MLX5_SET(mkc, mkc, lr, 1);
2286 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
2287 MLX5_SET(mkc, mkc, en_rinval, !!((ibmw->type == IB_MW_TYPE_2)));
2288 MLX5_SET(mkc, mkc, qpn, 0xffffff);
2290 err = mlx5_ib_create_mkey(dev, &mw->mmkey, in, inlen);
2294 mw->mmkey.type = MLX5_MKEY_MW;
2295 ibmw->rkey = mw->mmkey.key;
2296 mw->mmkey.ndescs = ndescs;
2298 resp.response_length =
2299 min(offsetofend(typeof(resp), response_length), udata->outlen);
2300 if (resp.response_length) {
2301 err = ib_copy_to_udata(udata, &resp, resp.response_length);
2306 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
2307 err = mlx5r_store_odp_mkey(dev, &mw->mmkey);
2316 mlx5_core_destroy_mkey(dev->mdev, mw->mmkey.key);
2322 int mlx5_ib_dealloc_mw(struct ib_mw *mw)
2324 struct mlx5_ib_dev *dev = to_mdev(mw->device);
2325 struct mlx5_ib_mw *mmw = to_mmw(mw);
2327 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) &&
2328 xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key)))
2330 * pagefault_single_data_segment() may be accessing mmw
2331 * if the user bound an ODP MR to this MW.
2333 mlx5r_deref_wait_odp_mkey(&mmw->mmkey);
2335 return mlx5_core_destroy_mkey(dev->mdev, mmw->mmkey.key);
2338 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
2339 struct ib_mr_status *mr_status)
2341 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
2344 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
2345 pr_err("Invalid status check mask\n");
2350 mr_status->fail_status = 0;
2351 if (check_mask & IB_MR_CHECK_SIG_STATUS) {
2354 pr_err("signature status check requested on a non-signature enabled MR\n");
2358 mmr->sig->sig_status_checked = true;
2359 if (!mmr->sig->sig_err_exists)
2362 if (ibmr->lkey == mmr->sig->err_item.key)
2363 memcpy(&mr_status->sig_err, &mmr->sig->err_item,
2364 sizeof(mr_status->sig_err));
2366 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
2367 mr_status->sig_err.sig_err_offset = 0;
2368 mr_status->sig_err.key = mmr->sig->err_item.key;
2371 mmr->sig->sig_err_exists = false;
2372 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
2380 mlx5_ib_map_pa_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2381 int data_sg_nents, unsigned int *data_sg_offset,
2382 struct scatterlist *meta_sg, int meta_sg_nents,
2383 unsigned int *meta_sg_offset)
2385 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2386 unsigned int sg_offset = 0;
2389 mr->meta_length = 0;
2390 if (data_sg_nents == 1) {
2392 mr->mmkey.ndescs = 1;
2394 sg_offset = *data_sg_offset;
2395 mr->data_length = sg_dma_len(data_sg) - sg_offset;
2396 mr->data_iova = sg_dma_address(data_sg) + sg_offset;
2397 if (meta_sg_nents == 1) {
2399 mr->meta_ndescs = 1;
2401 sg_offset = *meta_sg_offset;
2404 mr->meta_length = sg_dma_len(meta_sg) - sg_offset;
2405 mr->pi_iova = sg_dma_address(meta_sg) + sg_offset;
2407 ibmr->length = mr->data_length + mr->meta_length;
2414 mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
2415 struct scatterlist *sgl,
2416 unsigned short sg_nents,
2417 unsigned int *sg_offset_p,
2418 struct scatterlist *meta_sgl,
2419 unsigned short meta_sg_nents,
2420 unsigned int *meta_sg_offset_p)
2422 struct scatterlist *sg = sgl;
2423 struct mlx5_klm *klms = mr->descs;
2424 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
2425 u32 lkey = mr->ibmr.pd->local_dma_lkey;
2428 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
2429 mr->ibmr.length = 0;
2431 for_each_sg(sgl, sg, sg_nents, i) {
2432 if (unlikely(i >= mr->max_descs))
2434 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
2435 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
2436 klms[i].key = cpu_to_be32(lkey);
2437 mr->ibmr.length += sg_dma_len(sg) - sg_offset;
2443 *sg_offset_p = sg_offset;
2445 mr->mmkey.ndescs = i;
2446 mr->data_length = mr->ibmr.length;
2448 if (meta_sg_nents) {
2450 sg_offset = meta_sg_offset_p ? *meta_sg_offset_p : 0;
2451 for_each_sg(meta_sgl, sg, meta_sg_nents, j) {
2452 if (unlikely(i + j >= mr->max_descs))
2454 klms[i + j].va = cpu_to_be64(sg_dma_address(sg) +
2456 klms[i + j].bcount = cpu_to_be32(sg_dma_len(sg) -
2458 klms[i + j].key = cpu_to_be32(lkey);
2459 mr->ibmr.length += sg_dma_len(sg) - sg_offset;
2463 if (meta_sg_offset_p)
2464 *meta_sg_offset_p = sg_offset;
2466 mr->meta_ndescs = j;
2467 mr->meta_length = mr->ibmr.length - mr->data_length;
2473 static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
2475 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2478 if (unlikely(mr->mmkey.ndescs == mr->max_descs))
2482 descs[mr->mmkey.ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
2487 static int mlx5_set_page_pi(struct ib_mr *ibmr, u64 addr)
2489 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2492 if (unlikely(mr->mmkey.ndescs + mr->meta_ndescs == mr->max_descs))
2496 descs[mr->mmkey.ndescs + mr->meta_ndescs++] =
2497 cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
2503 mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2504 int data_sg_nents, unsigned int *data_sg_offset,
2505 struct scatterlist *meta_sg, int meta_sg_nents,
2506 unsigned int *meta_sg_offset)
2508 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2509 struct mlx5_ib_mr *pi_mr = mr->mtt_mr;
2512 pi_mr->mmkey.ndescs = 0;
2513 pi_mr->meta_ndescs = 0;
2514 pi_mr->meta_length = 0;
2516 ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map,
2517 pi_mr->desc_size * pi_mr->max_descs,
2520 pi_mr->ibmr.page_size = ibmr->page_size;
2521 n = ib_sg_to_pages(&pi_mr->ibmr, data_sg, data_sg_nents, data_sg_offset,
2523 if (n != data_sg_nents)
2526 pi_mr->data_iova = pi_mr->ibmr.iova;
2527 pi_mr->data_length = pi_mr->ibmr.length;
2528 pi_mr->ibmr.length = pi_mr->data_length;
2529 ibmr->length = pi_mr->data_length;
2531 if (meta_sg_nents) {
2532 u64 page_mask = ~((u64)ibmr->page_size - 1);
2533 u64 iova = pi_mr->data_iova;
2535 n += ib_sg_to_pages(&pi_mr->ibmr, meta_sg, meta_sg_nents,
2536 meta_sg_offset, mlx5_set_page_pi);
2538 pi_mr->meta_length = pi_mr->ibmr.length;
2540 * PI address for the HW is the offset of the metadata address
2541 * relative to the first data page address.
2542 * It equals to first data page address + size of data pages +
2543 * metadata offset at the first metadata page
2545 pi_mr->pi_iova = (iova & page_mask) +
2546 pi_mr->mmkey.ndescs * ibmr->page_size +
2547 (pi_mr->ibmr.iova & ~page_mask);
2549 * In order to use one MTT MR for data and metadata, we register
2550 * also the gaps between the end of the data and the start of
2551 * the metadata (the sig MR will verify that the HW will access
2552 * to right addresses). This mapping is safe because we use
2553 * internal mkey for the registration.
2555 pi_mr->ibmr.length = pi_mr->pi_iova + pi_mr->meta_length - iova;
2556 pi_mr->ibmr.iova = iova;
2557 ibmr->length += pi_mr->meta_length;
2560 ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map,
2561 pi_mr->desc_size * pi_mr->max_descs,
2568 mlx5_ib_map_klm_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2569 int data_sg_nents, unsigned int *data_sg_offset,
2570 struct scatterlist *meta_sg, int meta_sg_nents,
2571 unsigned int *meta_sg_offset)
2573 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2574 struct mlx5_ib_mr *pi_mr = mr->klm_mr;
2577 pi_mr->mmkey.ndescs = 0;
2578 pi_mr->meta_ndescs = 0;
2579 pi_mr->meta_length = 0;
2581 ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map,
2582 pi_mr->desc_size * pi_mr->max_descs,
2585 n = mlx5_ib_sg_to_klms(pi_mr, data_sg, data_sg_nents, data_sg_offset,
2586 meta_sg, meta_sg_nents, meta_sg_offset);
2588 ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map,
2589 pi_mr->desc_size * pi_mr->max_descs,
2592 /* This is zero-based memory region */
2593 pi_mr->data_iova = 0;
2594 pi_mr->ibmr.iova = 0;
2595 pi_mr->pi_iova = pi_mr->data_length;
2596 ibmr->length = pi_mr->ibmr.length;
2601 int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2602 int data_sg_nents, unsigned int *data_sg_offset,
2603 struct scatterlist *meta_sg, int meta_sg_nents,
2604 unsigned int *meta_sg_offset)
2606 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2607 struct mlx5_ib_mr *pi_mr = NULL;
2610 WARN_ON(ibmr->type != IB_MR_TYPE_INTEGRITY);
2612 mr->mmkey.ndescs = 0;
2613 mr->data_length = 0;
2615 mr->meta_ndescs = 0;
2618 * As a performance optimization, if possible, there is no need to
2619 * perform UMR operation to register the data/metadata buffers.
2620 * First try to map the sg lists to PA descriptors with local_dma_lkey.
2621 * Fallback to UMR only in case of a failure.
2623 n = mlx5_ib_map_pa_mr_sg_pi(ibmr, data_sg, data_sg_nents,
2624 data_sg_offset, meta_sg, meta_sg_nents,
2626 if (n == data_sg_nents + meta_sg_nents)
2629 * As a performance optimization, if possible, there is no need to map
2630 * the sg lists to KLM descriptors. First try to map the sg lists to MTT
2631 * descriptors and fallback to KLM only in case of a failure.
2632 * It's more efficient for the HW to work with MTT descriptors
2633 * (especially in high load).
2634 * Use KLM (indirect access) only if it's mandatory.
2637 n = mlx5_ib_map_mtt_mr_sg_pi(ibmr, data_sg, data_sg_nents,
2638 data_sg_offset, meta_sg, meta_sg_nents,
2640 if (n == data_sg_nents + meta_sg_nents)
2644 n = mlx5_ib_map_klm_mr_sg_pi(ibmr, data_sg, data_sg_nents,
2645 data_sg_offset, meta_sg, meta_sg_nents,
2647 if (unlikely(n != data_sg_nents + meta_sg_nents))
2651 /* This is zero-based memory region */
2655 ibmr->sig_attrs->meta_length = pi_mr->meta_length;
2657 ibmr->sig_attrs->meta_length = mr->meta_length;
2662 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
2663 unsigned int *sg_offset)
2665 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2668 mr->mmkey.ndescs = 0;
2670 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
2671 mr->desc_size * mr->max_descs,
2674 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
2675 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset, NULL, 0,
2678 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
2681 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
2682 mr->desc_size * mr->max_descs,