2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/kref.h>
35 #include <linux/random.h>
36 #include <linux/debugfs.h>
37 #include <linux/export.h>
38 #include <linux/delay.h>
39 #include <rdma/ib_umem.h>
40 #include <rdma/ib_umem_odp.h>
41 #include <rdma/ib_verbs.h>
45 MAX_PENDING_REG_MR = 8,
48 #define MLX5_UMR_ALIGN 2048
50 static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
51 static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
52 static int mr_cache_max_order(struct mlx5_ib_dev *dev);
53 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
54 static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
56 return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
59 static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
61 return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
64 static bool use_umr(struct mlx5_ib_dev *dev, int order)
66 return order <= mr_cache_max_order(dev) &&
67 umr_can_modify_entity_size(dev);
70 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
72 int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
74 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
75 /* Wait until all page fault handlers using the mr complete. */
76 synchronize_srcu(&dev->mr_srcu);
82 static int order2idx(struct mlx5_ib_dev *dev, int order)
84 struct mlx5_mr_cache *cache = &dev->cache;
86 if (order < cache->ent[0].order)
89 return order - cache->ent[0].order;
92 static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
94 return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
95 length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
98 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
99 static void update_odp_mr(struct mlx5_ib_mr *mr)
101 if (mr->umem->odp_data) {
103 * This barrier prevents the compiler from moving the
104 * setting of umem->odp_data->private to point to our
105 * MR, before reg_umr finished, to ensure that the MR
106 * initialization have finished before starting to
107 * handle invalidations.
110 mr->umem->odp_data->private = mr;
112 * Make sure we will see the new
113 * umem->odp_data->private value in the invalidation
114 * routines, before we can get page faults on the
115 * MR. Page faults can happen once we put the MR in
116 * the tree, below this line. Without the barrier,
117 * there can be a fault handling and an invalidation
118 * before umem->odp_data->private == mr is visible to
119 * the invalidation handler.
126 static void reg_mr_callback(int status, void *context)
128 struct mlx5_ib_mr *mr = context;
129 struct mlx5_ib_dev *dev = mr->dev;
130 struct mlx5_mr_cache *cache = &dev->cache;
131 int c = order2idx(dev, mr->order);
132 struct mlx5_cache_ent *ent = &cache->ent[c];
135 struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table;
138 spin_lock_irqsave(&ent->lock, flags);
140 spin_unlock_irqrestore(&ent->lock, flags);
142 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
145 mod_timer(&dev->delay_timer, jiffies + HZ);
149 mr->mmkey.type = MLX5_MKEY_MR;
150 spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
151 key = dev->mdev->priv.mkey_key++;
152 spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
153 mr->mmkey.key = mlx5_idx_to_mkey(MLX5_GET(create_mkey_out, mr->out, mkey_index)) | key;
155 cache->last_add = jiffies;
157 spin_lock_irqsave(&ent->lock, flags);
158 list_add_tail(&mr->list, &ent->head);
161 spin_unlock_irqrestore(&ent->lock, flags);
163 write_lock_irqsave(&table->lock, flags);
164 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmkey.key),
167 pr_err("Error inserting to mkey tree. 0x%x\n", -err);
168 write_unlock_irqrestore(&table->lock, flags);
170 if (!completion_done(&ent->compl))
171 complete(&ent->compl);
174 static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
176 struct mlx5_mr_cache *cache = &dev->cache;
177 struct mlx5_cache_ent *ent = &cache->ent[c];
178 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
179 struct mlx5_ib_mr *mr;
185 in = kzalloc(inlen, GFP_KERNEL);
189 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
190 for (i = 0; i < num; i++) {
191 if (ent->pending >= MAX_PENDING_REG_MR) {
196 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
201 mr->order = ent->order;
202 mr->allocated_from_cache = 1;
205 MLX5_SET(mkc, mkc, free, 1);
206 MLX5_SET(mkc, mkc, umr_en, 1);
207 MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3);
208 MLX5_SET(mkc, mkc, access_mode_4_2,
209 (ent->access_mode >> 2) & 0x7);
211 MLX5_SET(mkc, mkc, qpn, 0xffffff);
212 MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
213 MLX5_SET(mkc, mkc, log_page_size, ent->page);
215 spin_lock_irq(&ent->lock);
217 spin_unlock_irq(&ent->lock);
218 err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey,
220 mr->out, sizeof(mr->out),
221 reg_mr_callback, mr);
223 spin_lock_irq(&ent->lock);
225 spin_unlock_irq(&ent->lock);
226 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
236 static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
238 struct mlx5_mr_cache *cache = &dev->cache;
239 struct mlx5_cache_ent *ent = &cache->ent[c];
240 struct mlx5_ib_mr *tmp_mr;
241 struct mlx5_ib_mr *mr;
245 for (i = 0; i < num; i++) {
246 spin_lock_irq(&ent->lock);
247 if (list_empty(&ent->head)) {
248 spin_unlock_irq(&ent->lock);
251 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
252 list_move(&mr->list, &del_list);
255 spin_unlock_irq(&ent->lock);
256 mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
259 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
260 synchronize_srcu(&dev->mr_srcu);
263 list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
269 static ssize_t size_write(struct file *filp, const char __user *buf,
270 size_t count, loff_t *pos)
272 struct mlx5_cache_ent *ent = filp->private_data;
273 struct mlx5_ib_dev *dev = ent->dev;
279 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
282 c = order2idx(dev, ent->order);
283 lbuf[sizeof(lbuf) - 1] = 0;
285 if (sscanf(lbuf, "%u", &var) != 1)
288 if (var < ent->limit)
291 if (var > ent->size) {
293 err = add_keys(dev, c, var - ent->size);
294 if (err && err != -EAGAIN)
297 usleep_range(3000, 5000);
299 } else if (var < ent->size) {
300 remove_keys(dev, c, ent->size - var);
306 static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
309 struct mlx5_cache_ent *ent = filp->private_data;
316 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
320 if (copy_to_user(buf, lbuf, err))
328 static const struct file_operations size_fops = {
329 .owner = THIS_MODULE,
335 static ssize_t limit_write(struct file *filp, const char __user *buf,
336 size_t count, loff_t *pos)
338 struct mlx5_cache_ent *ent = filp->private_data;
339 struct mlx5_ib_dev *dev = ent->dev;
345 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
348 c = order2idx(dev, ent->order);
349 lbuf[sizeof(lbuf) - 1] = 0;
351 if (sscanf(lbuf, "%u", &var) != 1)
359 if (ent->cur < ent->limit) {
360 err = add_keys(dev, c, 2 * ent->limit - ent->cur);
368 static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
371 struct mlx5_cache_ent *ent = filp->private_data;
378 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
382 if (copy_to_user(buf, lbuf, err))
390 static const struct file_operations limit_fops = {
391 .owner = THIS_MODULE,
393 .write = limit_write,
397 static int someone_adding(struct mlx5_mr_cache *cache)
401 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
402 if (cache->ent[i].cur < cache->ent[i].limit)
409 static void __cache_work_func(struct mlx5_cache_ent *ent)
411 struct mlx5_ib_dev *dev = ent->dev;
412 struct mlx5_mr_cache *cache = &dev->cache;
413 int i = order2idx(dev, ent->order);
419 ent = &dev->cache.ent[i];
420 if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
421 err = add_keys(dev, i, 1);
422 if (ent->cur < 2 * ent->limit) {
423 if (err == -EAGAIN) {
424 mlx5_ib_dbg(dev, "returned eagain, order %d\n",
426 queue_delayed_work(cache->wq, &ent->dwork,
427 msecs_to_jiffies(3));
429 mlx5_ib_warn(dev, "command failed order %d, err %d\n",
431 queue_delayed_work(cache->wq, &ent->dwork,
432 msecs_to_jiffies(1000));
434 queue_work(cache->wq, &ent->work);
437 } else if (ent->cur > 2 * ent->limit) {
439 * The remove_keys() logic is performed as garbage collection
440 * task. Such task is intended to be run when no other active
441 * processes are running.
443 * The need_resched() will return TRUE if there are user tasks
444 * to be activated in near future.
446 * In such case, we don't execute remove_keys() and postpone
447 * the garbage collection work to try to run in next cycle,
448 * in order to free CPU resources to other tasks.
450 if (!need_resched() && !someone_adding(cache) &&
451 time_after(jiffies, cache->last_add + 300 * HZ)) {
452 remove_keys(dev, i, 1);
453 if (ent->cur > ent->limit)
454 queue_work(cache->wq, &ent->work);
456 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
461 static void delayed_cache_work_func(struct work_struct *work)
463 struct mlx5_cache_ent *ent;
465 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
466 __cache_work_func(ent);
469 static void cache_work_func(struct work_struct *work)
471 struct mlx5_cache_ent *ent;
473 ent = container_of(work, struct mlx5_cache_ent, work);
474 __cache_work_func(ent);
477 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry)
479 struct mlx5_mr_cache *cache = &dev->cache;
480 struct mlx5_cache_ent *ent;
481 struct mlx5_ib_mr *mr;
484 if (entry < 0 || entry >= MAX_MR_CACHE_ENTRIES) {
485 mlx5_ib_err(dev, "cache entry %d is out of range\n", entry);
489 ent = &cache->ent[entry];
491 spin_lock_irq(&ent->lock);
492 if (list_empty(&ent->head)) {
493 spin_unlock_irq(&ent->lock);
495 err = add_keys(dev, entry, 1);
496 if (err && err != -EAGAIN)
499 wait_for_completion(&ent->compl);
501 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
505 spin_unlock_irq(&ent->lock);
506 if (ent->cur < ent->limit)
507 queue_work(cache->wq, &ent->work);
513 static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
515 struct mlx5_mr_cache *cache = &dev->cache;
516 struct mlx5_ib_mr *mr = NULL;
517 struct mlx5_cache_ent *ent;
518 int last_umr_cache_entry;
522 c = order2idx(dev, order);
523 last_umr_cache_entry = order2idx(dev, mr_cache_max_order(dev));
524 if (c < 0 || c > last_umr_cache_entry) {
525 mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
529 for (i = c; i <= last_umr_cache_entry; i++) {
530 ent = &cache->ent[i];
532 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
534 spin_lock_irq(&ent->lock);
535 if (!list_empty(&ent->head)) {
536 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
540 spin_unlock_irq(&ent->lock);
541 if (ent->cur < ent->limit)
542 queue_work(cache->wq, &ent->work);
545 spin_unlock_irq(&ent->lock);
547 queue_work(cache->wq, &ent->work);
551 cache->ent[c].miss++;
556 void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
558 struct mlx5_mr_cache *cache = &dev->cache;
559 struct mlx5_cache_ent *ent;
563 c = order2idx(dev, mr->order);
564 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
565 mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
569 if (unreg_umr(dev, mr))
572 ent = &cache->ent[c];
573 spin_lock_irq(&ent->lock);
574 list_add_tail(&mr->list, &ent->head);
576 if (ent->cur > 2 * ent->limit)
578 spin_unlock_irq(&ent->lock);
581 queue_work(cache->wq, &ent->work);
584 static void clean_keys(struct mlx5_ib_dev *dev, int c)
586 struct mlx5_mr_cache *cache = &dev->cache;
587 struct mlx5_cache_ent *ent = &cache->ent[c];
588 struct mlx5_ib_mr *tmp_mr;
589 struct mlx5_ib_mr *mr;
592 cancel_delayed_work(&ent->dwork);
594 spin_lock_irq(&ent->lock);
595 if (list_empty(&ent->head)) {
596 spin_unlock_irq(&ent->lock);
599 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
600 list_move(&mr->list, &del_list);
603 spin_unlock_irq(&ent->lock);
604 mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
607 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
608 synchronize_srcu(&dev->mr_srcu);
611 list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
617 static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
619 if (!mlx5_debugfs_root || dev->rep)
622 debugfs_remove_recursive(dev->cache.root);
623 dev->cache.root = NULL;
626 static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
628 struct mlx5_mr_cache *cache = &dev->cache;
629 struct mlx5_cache_ent *ent;
632 if (!mlx5_debugfs_root || dev->rep)
635 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
639 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
640 ent = &cache->ent[i];
641 sprintf(ent->name, "%d", ent->order);
642 ent->dir = debugfs_create_dir(ent->name, cache->root);
646 ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
651 ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
656 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
661 ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
669 mlx5_mr_cache_debugfs_cleanup(dev);
674 static void delay_time_func(struct timer_list *t)
676 struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer);
681 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
683 struct mlx5_mr_cache *cache = &dev->cache;
684 struct mlx5_cache_ent *ent;
688 mutex_init(&dev->slow_path_mutex);
689 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
691 mlx5_ib_warn(dev, "failed to create work queue\n");
695 timer_setup(&dev->delay_timer, delay_time_func, 0);
696 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
697 ent = &cache->ent[i];
698 INIT_LIST_HEAD(&ent->head);
699 spin_lock_init(&ent->lock);
704 init_completion(&ent->compl);
705 INIT_WORK(&ent->work, cache_work_func);
706 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
707 queue_work(cache->wq, &ent->work);
709 if (i > MR_CACHE_LAST_STD_ENTRY) {
710 mlx5_odp_init_mr_cache_entry(ent);
714 if (ent->order > mr_cache_max_order(dev))
717 ent->page = PAGE_SHIFT;
718 ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
719 MLX5_IB_UMR_OCTOWORD;
720 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
721 if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
723 mlx5_core_is_pf(dev->mdev))
724 ent->limit = dev->mdev->profile->mr_cache[i].limit;
729 err = mlx5_mr_cache_debugfs_init(dev);
731 mlx5_ib_warn(dev, "cache debugfs failure\n");
734 * We don't want to fail driver if debugfs failed to initialize,
735 * so we are not forwarding error to the user.
741 static void wait_for_async_commands(struct mlx5_ib_dev *dev)
743 struct mlx5_mr_cache *cache = &dev->cache;
744 struct mlx5_cache_ent *ent;
749 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
750 ent = &cache->ent[i];
751 for (j = 0 ; j < 1000; j++) {
757 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
758 ent = &cache->ent[i];
759 total += ent->pending;
763 mlx5_ib_warn(dev, "aborted while there are %d pending mr requests\n", total);
765 mlx5_ib_warn(dev, "done with all pending requests\n");
768 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
775 dev->cache.stopped = 1;
776 flush_workqueue(dev->cache.wq);
778 mlx5_mr_cache_debugfs_cleanup(dev);
780 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
783 destroy_workqueue(dev->cache.wq);
784 wait_for_async_commands(dev);
785 del_timer_sync(&dev->delay_timer);
790 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
792 struct mlx5_ib_dev *dev = to_mdev(pd->device);
793 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
794 struct mlx5_core_dev *mdev = dev->mdev;
795 struct mlx5_ib_mr *mr;
800 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
802 return ERR_PTR(-ENOMEM);
804 in = kzalloc(inlen, GFP_KERNEL);
810 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
812 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
813 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
814 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
815 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
816 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
817 MLX5_SET(mkc, mkc, lr, 1);
819 MLX5_SET(mkc, mkc, length64, 1);
820 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
821 MLX5_SET(mkc, mkc, qpn, 0xffffff);
822 MLX5_SET64(mkc, mkc, start_addr, 0);
824 err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
829 mr->mmkey.type = MLX5_MKEY_MR;
830 mr->ibmr.lkey = mr->mmkey.key;
831 mr->ibmr.rkey = mr->mmkey.key;
845 static int get_octo_len(u64 addr, u64 len, int page_shift)
847 u64 page_size = 1ULL << page_shift;
851 offset = addr & (page_size - 1);
852 npages = ALIGN(len + offset, page_size) >> page_shift;
853 return (npages + 1) / 2;
856 static int mr_cache_max_order(struct mlx5_ib_dev *dev)
858 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
859 return MR_CACHE_LAST_STD_ENTRY + 2;
860 return MLX5_MAX_UMR_SHIFT;
863 static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
864 int access_flags, struct ib_umem **umem,
865 int *npages, int *page_shift, int *ncont,
868 struct mlx5_ib_dev *dev = to_mdev(pd->device);
874 u = ib_umem_get(pd->uobject->context, start, length, access_flags, 0);
875 err = PTR_ERR_OR_ZERO(u);
877 mlx5_ib_dbg(dev, "umem get failed (%d)\n", err);
881 mlx5_ib_cont_pages(u, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
882 page_shift, ncont, order);
884 mlx5_ib_warn(dev, "avoid zero region\n");
891 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
892 *npages, *ncont, *order, *page_shift);
897 static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
899 struct mlx5_ib_umr_context *context =
900 container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
902 context->status = wc->status;
903 complete(&context->done);
906 static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
908 context->cqe.done = mlx5_ib_umr_done;
909 context->status = -1;
910 init_completion(&context->done);
913 static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
914 struct mlx5_umr_wr *umrwr)
916 struct umr_common *umrc = &dev->umrc;
917 struct ib_send_wr *bad;
919 struct mlx5_ib_umr_context umr_context;
921 mlx5_ib_init_umr_context(&umr_context);
922 umrwr->wr.wr_cqe = &umr_context.cqe;
925 err = ib_post_send(umrc->qp, &umrwr->wr, &bad);
927 mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err);
929 wait_for_completion(&umr_context.done);
930 if (umr_context.status != IB_WC_SUCCESS) {
931 mlx5_ib_warn(dev, "reg umr failed (%u)\n",
940 static struct mlx5_ib_mr *alloc_mr_from_cache(
941 struct ib_pd *pd, struct ib_umem *umem,
942 u64 virt_addr, u64 len, int npages,
943 int page_shift, int order, int access_flags)
945 struct mlx5_ib_dev *dev = to_mdev(pd->device);
946 struct mlx5_ib_mr *mr;
950 for (i = 0; i < 1; i++) {
951 mr = alloc_cached_mr(dev, order);
955 err = add_keys(dev, order2idx(dev, order), 1);
956 if (err && err != -EAGAIN) {
957 mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
963 return ERR_PTR(-EAGAIN);
967 mr->access_flags = access_flags;
968 mr->desc_size = sizeof(struct mlx5_mtt);
969 mr->mmkey.iova = virt_addr;
970 mr->mmkey.size = len;
971 mr->mmkey.pd = to_mpd(pd)->pdn;
976 static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages,
977 void *xlt, int page_shift, size_t size,
980 struct mlx5_ib_dev *dev = mr->dev;
981 struct ib_umem *umem = mr->umem;
983 if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
984 if (!umr_can_use_indirect_mkey(dev))
986 mlx5_odp_populate_klm(xlt, idx, npages, mr, flags);
990 npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx);
992 if (!(flags & MLX5_IB_UPD_XLT_ZAP)) {
993 __mlx5_ib_populate_pas(dev, umem, page_shift,
995 MLX5_IB_MTT_PRESENT);
996 /* Clear padding after the pages
997 * brought from the umem.
999 memset(xlt + (npages * sizeof(struct mlx5_mtt)), 0,
1000 size - npages * sizeof(struct mlx5_mtt));
1006 #define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \
1007 MLX5_UMR_MTT_ALIGNMENT)
1008 #define MLX5_SPARE_UMR_CHUNK 0x10000
1010 int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
1011 int page_shift, int flags)
1013 struct mlx5_ib_dev *dev = mr->dev;
1014 struct device *ddev = dev->ib_dev.dev.parent;
1018 struct mlx5_umr_wr wr;
1021 int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT)
1022 ? sizeof(struct mlx5_klm)
1023 : sizeof(struct mlx5_mtt);
1024 const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size;
1025 const int page_mask = page_align - 1;
1026 size_t pages_mapped = 0;
1027 size_t pages_to_map = 0;
1028 size_t pages_iter = 0;
1030 bool use_emergency_page = false;
1032 if ((flags & MLX5_IB_UPD_XLT_INDIRECT) &&
1033 !umr_can_use_indirect_mkey(dev))
1036 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
1037 * so we need to align the offset and length accordingly
1039 if (idx & page_mask) {
1040 npages += idx & page_mask;
1044 gfp = flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC : GFP_KERNEL;
1045 gfp |= __GFP_ZERO | __GFP_NOWARN;
1047 pages_to_map = ALIGN(npages, page_align);
1048 size = desc_size * pages_to_map;
1049 size = min_t(int, size, MLX5_MAX_UMR_CHUNK);
1051 xlt = (void *)__get_free_pages(gfp, get_order(size));
1052 if (!xlt && size > MLX5_SPARE_UMR_CHUNK) {
1053 mlx5_ib_dbg(dev, "Failed to allocate %d bytes of order %d. fallback to spare UMR allocation od %d bytes\n",
1054 size, get_order(size), MLX5_SPARE_UMR_CHUNK);
1056 size = MLX5_SPARE_UMR_CHUNK;
1057 xlt = (void *)__get_free_pages(gfp, get_order(size));
1061 mlx5_ib_warn(dev, "Using XLT emergency buffer\n");
1062 xlt = (void *)mlx5_ib_get_xlt_emergency_page();
1064 memset(xlt, 0, size);
1065 use_emergency_page = true;
1067 pages_iter = size / desc_size;
1068 dma = dma_map_single(ddev, xlt, size, DMA_TO_DEVICE);
1069 if (dma_mapping_error(ddev, dma)) {
1070 mlx5_ib_err(dev, "unable to map DMA during XLT update.\n");
1076 sg.lkey = dev->umrc.pd->local_dma_lkey;
1078 memset(&wr, 0, sizeof(wr));
1079 wr.wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT;
1080 if (!(flags & MLX5_IB_UPD_XLT_ENABLE))
1081 wr.wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1082 wr.wr.sg_list = &sg;
1084 wr.wr.opcode = MLX5_IB_WR_UMR;
1086 wr.pd = mr->ibmr.pd;
1087 wr.mkey = mr->mmkey.key;
1088 wr.length = mr->mmkey.size;
1089 wr.virt_addr = mr->mmkey.iova;
1090 wr.access_flags = mr->access_flags;
1091 wr.page_shift = page_shift;
1093 for (pages_mapped = 0;
1094 pages_mapped < pages_to_map && !err;
1095 pages_mapped += pages_iter, idx += pages_iter) {
1096 npages = min_t(int, pages_iter, pages_to_map - pages_mapped);
1097 dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
1098 npages = populate_xlt(mr, idx, npages, xlt,
1099 page_shift, size, flags);
1101 dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
1103 sg.length = ALIGN(npages * desc_size,
1104 MLX5_UMR_MTT_ALIGNMENT);
1106 if (pages_mapped + pages_iter >= pages_to_map) {
1107 if (flags & MLX5_IB_UPD_XLT_ENABLE)
1109 MLX5_IB_SEND_UMR_ENABLE_MR |
1110 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS |
1111 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1112 if (flags & MLX5_IB_UPD_XLT_PD ||
1113 flags & MLX5_IB_UPD_XLT_ACCESS)
1115 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
1116 if (flags & MLX5_IB_UPD_XLT_ADDR)
1118 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1121 wr.offset = idx * desc_size;
1122 wr.xlt_size = sg.length;
1124 err = mlx5_ib_post_send_wait(dev, &wr);
1126 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
1129 if (use_emergency_page)
1130 mlx5_ib_put_xlt_emergency_page();
1132 free_pages((unsigned long)xlt, get_order(size));
1138 * If ibmr is NULL it will be allocated by reg_create.
1139 * Else, the given ibmr will be used.
1141 static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
1142 u64 virt_addr, u64 length,
1143 struct ib_umem *umem, int npages,
1144 int page_shift, int access_flags,
1147 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1148 struct mlx5_ib_mr *mr;
1154 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
1156 mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL);
1158 return ERR_PTR(-ENOMEM);
1161 mr->access_flags = access_flags;
1163 inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1165 inlen += sizeof(*pas) * roundup(npages, 2);
1166 in = kvzalloc(inlen, GFP_KERNEL);
1171 pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
1172 if (populate && !(access_flags & IB_ACCESS_ON_DEMAND))
1173 mlx5_ib_populate_pas(dev, umem, page_shift, pas,
1174 pg_cap ? MLX5_IB_MTT_PRESENT : 0);
1176 /* The pg_access bit allows setting the access flags
1177 * in the page list submitted with the command. */
1178 MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
1180 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1181 MLX5_SET(mkc, mkc, free, !populate);
1182 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
1183 MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
1184 MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
1185 MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
1186 MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
1187 MLX5_SET(mkc, mkc, lr, 1);
1188 MLX5_SET(mkc, mkc, umr_en, 1);
1190 MLX5_SET64(mkc, mkc, start_addr, virt_addr);
1191 MLX5_SET64(mkc, mkc, len, length);
1192 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1193 MLX5_SET(mkc, mkc, bsf_octword_size, 0);
1194 MLX5_SET(mkc, mkc, translations_octword_size,
1195 get_octo_len(virt_addr, length, page_shift));
1196 MLX5_SET(mkc, mkc, log_page_size, page_shift);
1197 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1199 MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
1200 get_octo_len(virt_addr, length, page_shift));
1203 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
1205 mlx5_ib_warn(dev, "create mkey failed\n");
1208 mr->mmkey.type = MLX5_MKEY_MR;
1209 mr->desc_size = sizeof(struct mlx5_mtt);
1213 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
1224 return ERR_PTR(err);
1227 static void set_mr_fileds(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
1228 int npages, u64 length, int access_flags)
1230 mr->npages = npages;
1231 atomic_add(npages, &dev->mdev->priv.reg_pages);
1232 mr->ibmr.lkey = mr->mmkey.key;
1233 mr->ibmr.rkey = mr->mmkey.key;
1234 mr->ibmr.length = length;
1235 mr->access_flags = access_flags;
1238 static struct ib_mr *mlx5_ib_get_memic_mr(struct ib_pd *pd, u64 memic_addr,
1239 u64 length, int acc)
1241 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1242 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1243 struct mlx5_core_dev *mdev = dev->mdev;
1244 struct mlx5_ib_mr *mr;
1249 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1251 return ERR_PTR(-ENOMEM);
1253 in = kzalloc(inlen, GFP_KERNEL);
1259 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1261 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MEMIC & 0x3);
1262 MLX5_SET(mkc, mkc, access_mode_4_2,
1263 (MLX5_MKC_ACCESS_MODE_MEMIC >> 2) & 0x7);
1264 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
1265 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
1266 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
1267 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
1268 MLX5_SET(mkc, mkc, lr, 1);
1270 MLX5_SET64(mkc, mkc, len, length);
1271 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1272 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1273 MLX5_SET64(mkc, mkc, start_addr,
1274 memic_addr - pci_resource_start(dev->mdev->pdev, 0));
1276 err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
1283 set_mr_fileds(dev, mr, 0, length, acc);
1293 return ERR_PTR(err);
1296 struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
1297 struct ib_dm_mr_attr *attr,
1298 struct uverbs_attr_bundle *attrs)
1300 struct mlx5_ib_dm *mdm = to_mdm(dm);
1303 if (attr->access_flags & ~MLX5_IB_DM_ALLOWED_ACCESS)
1304 return ERR_PTR(-EINVAL);
1306 memic_addr = mdm->dev_addr + attr->offset;
1308 return mlx5_ib_get_memic_mr(pd, memic_addr, attr->length,
1309 attr->access_flags);
1312 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1313 u64 virt_addr, int access_flags,
1314 struct ib_udata *udata)
1316 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1317 struct mlx5_ib_mr *mr = NULL;
1318 bool populate_mtts = false;
1319 struct ib_umem *umem;
1326 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
1327 return ERR_PTR(-EOPNOTSUPP);
1329 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1330 start, virt_addr, length, access_flags);
1332 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1333 if (!start && length == U64_MAX) {
1334 if (!(access_flags & IB_ACCESS_ON_DEMAND) ||
1335 !(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1336 return ERR_PTR(-EINVAL);
1338 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags);
1340 return ERR_CAST(mr);
1345 err = mr_umem_get(pd, start, length, access_flags, &umem, &npages,
1346 &page_shift, &ncont, &order);
1349 return ERR_PTR(err);
1351 if (use_umr(dev, order)) {
1352 mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
1353 page_shift, order, access_flags);
1354 if (PTR_ERR(mr) == -EAGAIN) {
1355 mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
1358 populate_mtts = false;
1359 } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
1360 if (access_flags & IB_ACCESS_ON_DEMAND) {
1362 pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
1365 populate_mtts = true;
1369 if (!umr_can_modify_entity_size(dev))
1370 populate_mtts = true;
1371 mutex_lock(&dev->slow_path_mutex);
1372 mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
1373 page_shift, access_flags, populate_mtts);
1374 mutex_unlock(&dev->slow_path_mutex);
1382 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
1385 set_mr_fileds(dev, mr, npages, length, access_flags);
1387 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1391 if (!populate_mtts) {
1392 int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
1394 if (access_flags & IB_ACCESS_ON_DEMAND)
1395 update_xlt_flags |= MLX5_IB_UPD_XLT_ZAP;
1397 err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift,
1402 return ERR_PTR(err);
1406 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1411 ib_umem_release(umem);
1412 return ERR_PTR(err);
1415 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1417 struct mlx5_core_dev *mdev = dev->mdev;
1418 struct mlx5_umr_wr umrwr = {};
1420 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
1423 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
1424 MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1425 umrwr.wr.opcode = MLX5_IB_WR_UMR;
1426 umrwr.mkey = mr->mmkey.key;
1428 return mlx5_ib_post_send_wait(dev, &umrwr);
1431 static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1432 int access_flags, int flags)
1434 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1435 struct mlx5_umr_wr umrwr = {};
1438 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1440 umrwr.wr.opcode = MLX5_IB_WR_UMR;
1441 umrwr.mkey = mr->mmkey.key;
1443 if (flags & IB_MR_REREG_PD || flags & IB_MR_REREG_ACCESS) {
1445 umrwr.access_flags = access_flags;
1446 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
1449 err = mlx5_ib_post_send_wait(dev, &umrwr);
1454 int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1455 u64 length, u64 virt_addr, int new_access_flags,
1456 struct ib_pd *new_pd, struct ib_udata *udata)
1458 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
1459 struct mlx5_ib_mr *mr = to_mmr(ib_mr);
1460 struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd;
1461 int access_flags = flags & IB_MR_REREG_ACCESS ?
1472 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1473 start, virt_addr, length, access_flags);
1475 atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
1480 if (flags & IB_MR_REREG_TRANS) {
1484 addr = mr->umem->address;
1485 len = mr->umem->length;
1488 if (flags != IB_MR_REREG_PD) {
1490 * Replace umem. This needs to be done whether or not UMR is
1493 flags |= IB_MR_REREG_TRANS;
1494 ib_umem_release(mr->umem);
1496 err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
1497 &npages, &page_shift, &ncont, &order);
1502 if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) {
1504 * UMR can't be used - MKey needs to be replaced.
1506 if (mr->allocated_from_cache)
1507 err = unreg_umr(dev, mr);
1509 err = destroy_mkey(dev, mr);
1513 mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
1514 page_shift, access_flags, true);
1522 mr->allocated_from_cache = 0;
1523 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1531 mr->access_flags = access_flags;
1532 mr->mmkey.iova = addr;
1533 mr->mmkey.size = len;
1534 mr->mmkey.pd = to_mpd(pd)->pdn;
1536 if (flags & IB_MR_REREG_TRANS) {
1537 upd_flags = MLX5_IB_UPD_XLT_ADDR;
1538 if (flags & IB_MR_REREG_PD)
1539 upd_flags |= MLX5_IB_UPD_XLT_PD;
1540 if (flags & IB_MR_REREG_ACCESS)
1541 upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
1542 err = mlx5_ib_update_xlt(mr, 0, npages, page_shift,
1545 err = rereg_umr(pd, mr, access_flags, flags);
1552 set_mr_fileds(dev, mr, npages, len, access_flags);
1554 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1561 ib_umem_release(mr->umem);
1569 mlx5_alloc_priv_descs(struct ib_device *device,
1570 struct mlx5_ib_mr *mr,
1574 int size = ndescs * desc_size;
1578 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
1580 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
1581 if (!mr->descs_alloc)
1584 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
1586 mr->desc_map = dma_map_single(device->dev.parent, mr->descs,
1587 size, DMA_TO_DEVICE);
1588 if (dma_mapping_error(device->dev.parent, mr->desc_map)) {
1595 kfree(mr->descs_alloc);
1601 mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1604 struct ib_device *device = mr->ibmr.device;
1605 int size = mr->max_descs * mr->desc_size;
1607 dma_unmap_single(device->dev.parent, mr->desc_map,
1608 size, DMA_TO_DEVICE);
1609 kfree(mr->descs_alloc);
1614 static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1616 int allocated_from_cache = mr->allocated_from_cache;
1619 if (mlx5_core_destroy_psv(dev->mdev,
1620 mr->sig->psv_memory.psv_idx))
1621 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1622 mr->sig->psv_memory.psv_idx);
1623 if (mlx5_core_destroy_psv(dev->mdev,
1624 mr->sig->psv_wire.psv_idx))
1625 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1626 mr->sig->psv_wire.psv_idx);
1631 mlx5_free_priv_descs(mr);
1633 if (!allocated_from_cache)
1634 destroy_mkey(dev, mr);
1637 static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1639 int npages = mr->npages;
1640 struct ib_umem *umem = mr->umem;
1642 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1643 if (umem && umem->odp_data) {
1644 /* Prevent new page faults from succeeding */
1646 /* Wait for all running page-fault handlers to finish. */
1647 synchronize_srcu(&dev->mr_srcu);
1648 /* Destroy all page mappings */
1649 if (umem->odp_data->page_list)
1650 mlx5_ib_invalidate_range(umem, ib_umem_start(umem),
1653 mlx5_ib_free_implicit_mr(mr);
1655 * We kill the umem before the MR for ODP,
1656 * so that there will not be any invalidations in
1657 * flight, looking at the *mr struct.
1659 ib_umem_release(umem);
1660 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1662 /* Avoid double-freeing the umem. */
1670 ib_umem_release(umem);
1671 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1674 if (!mr->allocated_from_cache)
1677 mlx5_mr_cache_free(dev, mr);
1680 int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
1682 dereg_mr(to_mdev(ibmr->device), to_mmr(ibmr));
1686 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
1687 enum ib_mr_type mr_type,
1690 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1691 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1692 int ndescs = ALIGN(max_num_sg, 4);
1693 struct mlx5_ib_mr *mr;
1698 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1700 return ERR_PTR(-ENOMEM);
1702 in = kzalloc(inlen, GFP_KERNEL);
1708 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1709 MLX5_SET(mkc, mkc, free, 1);
1710 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1711 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1712 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1714 if (mr_type == IB_MR_TYPE_MEM_REG) {
1715 mr->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
1716 MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
1717 err = mlx5_alloc_priv_descs(pd->device, mr,
1718 ndescs, sizeof(struct mlx5_mtt));
1722 mr->desc_size = sizeof(struct mlx5_mtt);
1723 mr->max_descs = ndescs;
1724 } else if (mr_type == IB_MR_TYPE_SG_GAPS) {
1725 mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
1727 err = mlx5_alloc_priv_descs(pd->device, mr,
1728 ndescs, sizeof(struct mlx5_klm));
1731 mr->desc_size = sizeof(struct mlx5_klm);
1732 mr->max_descs = ndescs;
1733 } else if (mr_type == IB_MR_TYPE_SIGNATURE) {
1736 MLX5_SET(mkc, mkc, bsf_en, 1);
1737 MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
1738 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
1744 /* create mem & wire PSVs */
1745 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
1750 mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
1751 mr->sig->psv_memory.psv_idx = psv_index[0];
1752 mr->sig->psv_wire.psv_idx = psv_index[1];
1754 mr->sig->sig_status_checked = true;
1755 mr->sig->sig_err_exists = false;
1756 /* Next UMR, Arm SIGERR */
1757 ++mr->sig->sigerr_count;
1759 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
1764 MLX5_SET(mkc, mkc, access_mode_1_0, mr->access_mode & 0x3);
1765 MLX5_SET(mkc, mkc, access_mode_4_2, (mr->access_mode >> 2) & 0x7);
1766 MLX5_SET(mkc, mkc, umr_en, 1);
1768 mr->ibmr.device = pd->device;
1769 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
1771 goto err_destroy_psv;
1773 mr->mmkey.type = MLX5_MKEY_MR;
1774 mr->ibmr.lkey = mr->mmkey.key;
1775 mr->ibmr.rkey = mr->mmkey.key;
1783 if (mlx5_core_destroy_psv(dev->mdev,
1784 mr->sig->psv_memory.psv_idx))
1785 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1786 mr->sig->psv_memory.psv_idx);
1787 if (mlx5_core_destroy_psv(dev->mdev,
1788 mr->sig->psv_wire.psv_idx))
1789 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1790 mr->sig->psv_wire.psv_idx);
1792 mlx5_free_priv_descs(mr);
1799 return ERR_PTR(err);
1802 struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
1803 struct ib_udata *udata)
1805 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1806 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1807 struct mlx5_ib_mw *mw = NULL;
1812 struct mlx5_ib_alloc_mw req = {};
1815 __u32 response_length;
1818 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
1820 return ERR_PTR(err);
1822 if (req.comp_mask || req.reserved1 || req.reserved2)
1823 return ERR_PTR(-EOPNOTSUPP);
1825 if (udata->inlen > sizeof(req) &&
1826 !ib_is_udata_cleared(udata, sizeof(req),
1827 udata->inlen - sizeof(req)))
1828 return ERR_PTR(-EOPNOTSUPP);
1830 ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
1832 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
1833 in = kzalloc(inlen, GFP_KERNEL);
1839 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1841 MLX5_SET(mkc, mkc, free, 1);
1842 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1843 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1844 MLX5_SET(mkc, mkc, umr_en, 1);
1845 MLX5_SET(mkc, mkc, lr, 1);
1846 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
1847 MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2)));
1848 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1850 err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, inlen);
1854 mw->mmkey.type = MLX5_MKEY_MW;
1855 mw->ibmw.rkey = mw->mmkey.key;
1856 mw->ndescs = ndescs;
1858 resp.response_length = min(offsetof(typeof(resp), response_length) +
1859 sizeof(resp.response_length), udata->outlen);
1860 if (resp.response_length) {
1861 err = ib_copy_to_udata(udata, &resp, resp.response_length);
1863 mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
1874 return ERR_PTR(err);
1877 int mlx5_ib_dealloc_mw(struct ib_mw *mw)
1879 struct mlx5_ib_mw *mmw = to_mmw(mw);
1882 err = mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev,
1889 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1890 struct ib_mr_status *mr_status)
1892 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1895 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
1896 pr_err("Invalid status check mask\n");
1901 mr_status->fail_status = 0;
1902 if (check_mask & IB_MR_CHECK_SIG_STATUS) {
1905 pr_err("signature status check requested on a non-signature enabled MR\n");
1909 mmr->sig->sig_status_checked = true;
1910 if (!mmr->sig->sig_err_exists)
1913 if (ibmr->lkey == mmr->sig->err_item.key)
1914 memcpy(&mr_status->sig_err, &mmr->sig->err_item,
1915 sizeof(mr_status->sig_err));
1917 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
1918 mr_status->sig_err.sig_err_offset = 0;
1919 mr_status->sig_err.key = mmr->sig->err_item.key;
1922 mmr->sig->sig_err_exists = false;
1923 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
1931 mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
1932 struct scatterlist *sgl,
1933 unsigned short sg_nents,
1934 unsigned int *sg_offset_p)
1936 struct scatterlist *sg = sgl;
1937 struct mlx5_klm *klms = mr->descs;
1938 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1939 u32 lkey = mr->ibmr.pd->local_dma_lkey;
1942 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
1943 mr->ibmr.length = 0;
1945 for_each_sg(sgl, sg, sg_nents, i) {
1946 if (unlikely(i >= mr->max_descs))
1948 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
1949 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
1950 klms[i].key = cpu_to_be32(lkey);
1951 mr->ibmr.length += sg_dma_len(sg) - sg_offset;
1958 *sg_offset_p = sg_offset;
1963 static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
1965 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1968 if (unlikely(mr->ndescs == mr->max_descs))
1972 descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
1977 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
1978 unsigned int *sg_offset)
1980 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1985 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
1986 mr->desc_size * mr->max_descs,
1989 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
1990 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset);
1992 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
1995 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
1996 mr->desc_size * mr->max_descs,