2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/kref.h>
35 #include <linux/random.h>
36 #include <linux/debugfs.h>
37 #include <linux/export.h>
38 #include <linux/delay.h>
39 #include <rdma/ib_umem.h>
40 #include <rdma/ib_umem_odp.h>
41 #include <rdma/ib_verbs.h>
45 MAX_PENDING_REG_MR = 8,
48 #define MLX5_UMR_ALIGN 2048
50 static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
51 static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
52 static int mr_cache_max_order(struct mlx5_ib_dev *dev);
53 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
54 static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
56 return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
59 static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
61 return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
64 static bool use_umr(struct mlx5_ib_dev *dev, int order)
66 return order <= mr_cache_max_order(dev) &&
67 umr_can_modify_entity_size(dev);
70 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
72 int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
74 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
75 /* Wait until all page fault handlers using the mr complete. */
76 synchronize_srcu(&dev->mr_srcu);
81 static int order2idx(struct mlx5_ib_dev *dev, int order)
83 struct mlx5_mr_cache *cache = &dev->cache;
85 if (order < cache->ent[0].order)
88 return order - cache->ent[0].order;
91 static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
93 return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
94 length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
97 static void update_odp_mr(struct mlx5_ib_mr *mr)
101 * This barrier prevents the compiler from moving the
102 * setting of umem->odp_data->private to point to our
103 * MR, before reg_umr finished, to ensure that the MR
104 * initialization have finished before starting to
105 * handle invalidations.
108 to_ib_umem_odp(mr->umem)->private = mr;
110 * Make sure we will see the new
111 * umem->odp_data->private value in the invalidation
112 * routines, before we can get page faults on the
113 * MR. Page faults can happen once we put the MR in
114 * the tree, below this line. Without the barrier,
115 * there can be a fault handling and an invalidation
116 * before umem->odp_data->private == mr is visible to
117 * the invalidation handler.
123 static void reg_mr_callback(int status, struct mlx5_async_work *context)
125 struct mlx5_ib_mr *mr =
126 container_of(context, struct mlx5_ib_mr, cb_work);
127 struct mlx5_ib_dev *dev = mr->dev;
128 struct mlx5_mr_cache *cache = &dev->cache;
129 int c = order2idx(dev, mr->order);
130 struct mlx5_cache_ent *ent = &cache->ent[c];
133 struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table;
136 spin_lock_irqsave(&ent->lock, flags);
138 spin_unlock_irqrestore(&ent->lock, flags);
140 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
143 mod_timer(&dev->delay_timer, jiffies + HZ);
147 mr->mmkey.type = MLX5_MKEY_MR;
148 spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
149 key = dev->mdev->priv.mkey_key++;
150 spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
151 mr->mmkey.key = mlx5_idx_to_mkey(MLX5_GET(create_mkey_out, mr->out, mkey_index)) | key;
153 cache->last_add = jiffies;
155 spin_lock_irqsave(&ent->lock, flags);
156 list_add_tail(&mr->list, &ent->head);
159 spin_unlock_irqrestore(&ent->lock, flags);
161 write_lock_irqsave(&table->lock, flags);
162 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmkey.key),
165 pr_err("Error inserting to mkey tree. 0x%x\n", -err);
166 write_unlock_irqrestore(&table->lock, flags);
168 if (!completion_done(&ent->compl))
169 complete(&ent->compl);
172 static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
174 struct mlx5_mr_cache *cache = &dev->cache;
175 struct mlx5_cache_ent *ent = &cache->ent[c];
176 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
177 struct mlx5_ib_mr *mr;
183 in = kzalloc(inlen, GFP_KERNEL);
187 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
188 for (i = 0; i < num; i++) {
189 if (ent->pending >= MAX_PENDING_REG_MR) {
194 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
199 mr->order = ent->order;
200 mr->allocated_from_cache = 1;
203 MLX5_SET(mkc, mkc, free, 1);
204 MLX5_SET(mkc, mkc, umr_en, 1);
205 MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3);
206 MLX5_SET(mkc, mkc, access_mode_4_2,
207 (ent->access_mode >> 2) & 0x7);
209 MLX5_SET(mkc, mkc, qpn, 0xffffff);
210 MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
211 MLX5_SET(mkc, mkc, log_page_size, ent->page);
213 spin_lock_irq(&ent->lock);
215 spin_unlock_irq(&ent->lock);
216 err = mlx5_core_create_mkey_cb(dev->mdev, &mr->mmkey,
217 &dev->async_ctx, in, inlen,
218 mr->out, sizeof(mr->out),
219 reg_mr_callback, &mr->cb_work);
221 spin_lock_irq(&ent->lock);
223 spin_unlock_irq(&ent->lock);
224 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
234 static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
236 struct mlx5_mr_cache *cache = &dev->cache;
237 struct mlx5_cache_ent *ent = &cache->ent[c];
238 struct mlx5_ib_mr *tmp_mr;
239 struct mlx5_ib_mr *mr;
243 for (i = 0; i < num; i++) {
244 spin_lock_irq(&ent->lock);
245 if (list_empty(&ent->head)) {
246 spin_unlock_irq(&ent->lock);
249 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
250 list_move(&mr->list, &del_list);
253 spin_unlock_irq(&ent->lock);
254 mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
257 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
258 synchronize_srcu(&dev->mr_srcu);
260 list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
266 static ssize_t size_write(struct file *filp, const char __user *buf,
267 size_t count, loff_t *pos)
269 struct mlx5_cache_ent *ent = filp->private_data;
270 struct mlx5_ib_dev *dev = ent->dev;
276 count = min(count, sizeof(lbuf) - 1);
277 if (copy_from_user(lbuf, buf, count))
280 c = order2idx(dev, ent->order);
282 if (sscanf(lbuf, "%u", &var) != 1)
285 if (var < ent->limit)
288 if (var > ent->size) {
290 err = add_keys(dev, c, var - ent->size);
291 if (err && err != -EAGAIN)
294 usleep_range(3000, 5000);
296 } else if (var < ent->size) {
297 remove_keys(dev, c, ent->size - var);
303 static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
306 struct mlx5_cache_ent *ent = filp->private_data;
310 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
314 return simple_read_from_buffer(buf, count, pos, lbuf, err);
317 static const struct file_operations size_fops = {
318 .owner = THIS_MODULE,
324 static ssize_t limit_write(struct file *filp, const char __user *buf,
325 size_t count, loff_t *pos)
327 struct mlx5_cache_ent *ent = filp->private_data;
328 struct mlx5_ib_dev *dev = ent->dev;
334 count = min(count, sizeof(lbuf) - 1);
335 if (copy_from_user(lbuf, buf, count))
338 c = order2idx(dev, ent->order);
340 if (sscanf(lbuf, "%u", &var) != 1)
348 if (ent->cur < ent->limit) {
349 err = add_keys(dev, c, 2 * ent->limit - ent->cur);
357 static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
360 struct mlx5_cache_ent *ent = filp->private_data;
364 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
368 return simple_read_from_buffer(buf, count, pos, lbuf, err);
371 static const struct file_operations limit_fops = {
372 .owner = THIS_MODULE,
374 .write = limit_write,
378 static int someone_adding(struct mlx5_mr_cache *cache)
382 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
383 if (cache->ent[i].cur < cache->ent[i].limit)
390 static void __cache_work_func(struct mlx5_cache_ent *ent)
392 struct mlx5_ib_dev *dev = ent->dev;
393 struct mlx5_mr_cache *cache = &dev->cache;
394 int i = order2idx(dev, ent->order);
400 ent = &dev->cache.ent[i];
401 if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
402 err = add_keys(dev, i, 1);
403 if (ent->cur < 2 * ent->limit) {
404 if (err == -EAGAIN) {
405 mlx5_ib_dbg(dev, "returned eagain, order %d\n",
407 queue_delayed_work(cache->wq, &ent->dwork,
408 msecs_to_jiffies(3));
410 mlx5_ib_warn(dev, "command failed order %d, err %d\n",
412 queue_delayed_work(cache->wq, &ent->dwork,
413 msecs_to_jiffies(1000));
415 queue_work(cache->wq, &ent->work);
418 } else if (ent->cur > 2 * ent->limit) {
420 * The remove_keys() logic is performed as garbage collection
421 * task. Such task is intended to be run when no other active
422 * processes are running.
424 * The need_resched() will return TRUE if there are user tasks
425 * to be activated in near future.
427 * In such case, we don't execute remove_keys() and postpone
428 * the garbage collection work to try to run in next cycle,
429 * in order to free CPU resources to other tasks.
431 if (!need_resched() && !someone_adding(cache) &&
432 time_after(jiffies, cache->last_add + 300 * HZ)) {
433 remove_keys(dev, i, 1);
434 if (ent->cur > ent->limit)
435 queue_work(cache->wq, &ent->work);
437 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
442 static void delayed_cache_work_func(struct work_struct *work)
444 struct mlx5_cache_ent *ent;
446 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
447 __cache_work_func(ent);
450 static void cache_work_func(struct work_struct *work)
452 struct mlx5_cache_ent *ent;
454 ent = container_of(work, struct mlx5_cache_ent, work);
455 __cache_work_func(ent);
458 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry)
460 struct mlx5_mr_cache *cache = &dev->cache;
461 struct mlx5_cache_ent *ent;
462 struct mlx5_ib_mr *mr;
465 if (entry < 0 || entry >= MAX_MR_CACHE_ENTRIES) {
466 mlx5_ib_err(dev, "cache entry %d is out of range\n", entry);
470 ent = &cache->ent[entry];
472 spin_lock_irq(&ent->lock);
473 if (list_empty(&ent->head)) {
474 spin_unlock_irq(&ent->lock);
476 err = add_keys(dev, entry, 1);
477 if (err && err != -EAGAIN)
480 wait_for_completion(&ent->compl);
482 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
486 spin_unlock_irq(&ent->lock);
487 if (ent->cur < ent->limit)
488 queue_work(cache->wq, &ent->work);
494 static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
496 struct mlx5_mr_cache *cache = &dev->cache;
497 struct mlx5_ib_mr *mr = NULL;
498 struct mlx5_cache_ent *ent;
499 int last_umr_cache_entry;
503 c = order2idx(dev, order);
504 last_umr_cache_entry = order2idx(dev, mr_cache_max_order(dev));
505 if (c < 0 || c > last_umr_cache_entry) {
506 mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
510 for (i = c; i <= last_umr_cache_entry; i++) {
511 ent = &cache->ent[i];
513 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
515 spin_lock_irq(&ent->lock);
516 if (!list_empty(&ent->head)) {
517 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
521 spin_unlock_irq(&ent->lock);
522 if (ent->cur < ent->limit)
523 queue_work(cache->wq, &ent->work);
526 spin_unlock_irq(&ent->lock);
528 queue_work(cache->wq, &ent->work);
532 cache->ent[c].miss++;
537 void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
539 struct mlx5_mr_cache *cache = &dev->cache;
540 struct mlx5_cache_ent *ent;
544 if (!mr->allocated_from_cache)
547 c = order2idx(dev, mr->order);
548 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
549 mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
553 if (unreg_umr(dev, mr))
556 ent = &cache->ent[c];
557 spin_lock_irq(&ent->lock);
558 list_add_tail(&mr->list, &ent->head);
560 if (ent->cur > 2 * ent->limit)
562 spin_unlock_irq(&ent->lock);
565 queue_work(cache->wq, &ent->work);
568 static void clean_keys(struct mlx5_ib_dev *dev, int c)
570 struct mlx5_mr_cache *cache = &dev->cache;
571 struct mlx5_cache_ent *ent = &cache->ent[c];
572 struct mlx5_ib_mr *tmp_mr;
573 struct mlx5_ib_mr *mr;
576 cancel_delayed_work(&ent->dwork);
578 spin_lock_irq(&ent->lock);
579 if (list_empty(&ent->head)) {
580 spin_unlock_irq(&ent->lock);
583 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
584 list_move(&mr->list, &del_list);
587 spin_unlock_irq(&ent->lock);
588 mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
591 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
592 synchronize_srcu(&dev->mr_srcu);
595 list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
601 static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
603 if (!mlx5_debugfs_root || dev->is_rep)
606 debugfs_remove_recursive(dev->cache.root);
607 dev->cache.root = NULL;
610 static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
612 struct mlx5_mr_cache *cache = &dev->cache;
613 struct mlx5_cache_ent *ent;
617 if (!mlx5_debugfs_root || dev->is_rep)
620 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
622 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
623 ent = &cache->ent[i];
624 sprintf(ent->name, "%d", ent->order);
625 dir = debugfs_create_dir(ent->name, cache->root);
626 debugfs_create_file("size", 0600, dir, ent, &size_fops);
627 debugfs_create_file("limit", 0600, dir, ent, &limit_fops);
628 debugfs_create_u32("cur", 0400, dir, &ent->cur);
629 debugfs_create_u32("miss", 0600, dir, &ent->miss);
633 static void delay_time_func(struct timer_list *t)
635 struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer);
640 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
642 struct mlx5_mr_cache *cache = &dev->cache;
643 struct mlx5_cache_ent *ent;
646 mutex_init(&dev->slow_path_mutex);
647 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
649 mlx5_ib_warn(dev, "failed to create work queue\n");
653 mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx);
654 timer_setup(&dev->delay_timer, delay_time_func, 0);
655 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
656 ent = &cache->ent[i];
657 INIT_LIST_HEAD(&ent->head);
658 spin_lock_init(&ent->lock);
663 init_completion(&ent->compl);
664 INIT_WORK(&ent->work, cache_work_func);
665 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
667 if (i > MR_CACHE_LAST_STD_ENTRY) {
668 mlx5_odp_init_mr_cache_entry(ent);
672 if (ent->order > mr_cache_max_order(dev))
675 ent->page = PAGE_SHIFT;
676 ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
677 MLX5_IB_UMR_OCTOWORD;
678 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
679 if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
681 mlx5_core_is_pf(dev->mdev))
682 ent->limit = dev->mdev->profile->mr_cache[i].limit;
685 queue_work(cache->wq, &ent->work);
688 mlx5_mr_cache_debugfs_init(dev);
693 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
700 dev->cache.stopped = 1;
701 flush_workqueue(dev->cache.wq);
703 mlx5_mr_cache_debugfs_cleanup(dev);
704 mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
706 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
709 destroy_workqueue(dev->cache.wq);
710 del_timer_sync(&dev->delay_timer);
715 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
717 struct mlx5_ib_dev *dev = to_mdev(pd->device);
718 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
719 struct mlx5_core_dev *mdev = dev->mdev;
720 struct mlx5_ib_mr *mr;
725 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
727 return ERR_PTR(-ENOMEM);
729 in = kzalloc(inlen, GFP_KERNEL);
735 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
737 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
738 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
739 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
740 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
741 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
742 MLX5_SET(mkc, mkc, lr, 1);
744 MLX5_SET(mkc, mkc, length64, 1);
745 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
746 MLX5_SET(mkc, mkc, qpn, 0xffffff);
747 MLX5_SET64(mkc, mkc, start_addr, 0);
749 err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
754 mr->mmkey.type = MLX5_MKEY_MR;
755 mr->ibmr.lkey = mr->mmkey.key;
756 mr->ibmr.rkey = mr->mmkey.key;
770 static int get_octo_len(u64 addr, u64 len, int page_shift)
772 u64 page_size = 1ULL << page_shift;
776 offset = addr & (page_size - 1);
777 npages = ALIGN(len + offset, page_size) >> page_shift;
778 return (npages + 1) / 2;
781 static int mr_cache_max_order(struct mlx5_ib_dev *dev)
783 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
784 return MR_CACHE_LAST_STD_ENTRY + 2;
785 return MLX5_MAX_UMR_SHIFT;
788 static int mr_umem_get(struct mlx5_ib_dev *dev, struct ib_udata *udata,
789 u64 start, u64 length, int access_flags,
790 struct ib_umem **umem, int *npages, int *page_shift,
791 int *ncont, int *order)
798 u = ib_umem_get(udata, start, length, access_flags, 0);
799 err = PTR_ERR_OR_ZERO(u);
801 mlx5_ib_dbg(dev, "umem get failed (%d)\n", err);
805 mlx5_ib_cont_pages(u, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
806 page_shift, ncont, order);
808 mlx5_ib_warn(dev, "avoid zero region\n");
815 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
816 *npages, *ncont, *order, *page_shift);
821 static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
823 struct mlx5_ib_umr_context *context =
824 container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
826 context->status = wc->status;
827 complete(&context->done);
830 static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
832 context->cqe.done = mlx5_ib_umr_done;
833 context->status = -1;
834 init_completion(&context->done);
837 static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
838 struct mlx5_umr_wr *umrwr)
840 struct umr_common *umrc = &dev->umrc;
841 const struct ib_send_wr *bad;
843 struct mlx5_ib_umr_context umr_context;
845 mlx5_ib_init_umr_context(&umr_context);
846 umrwr->wr.wr_cqe = &umr_context.cqe;
849 err = ib_post_send(umrc->qp, &umrwr->wr, &bad);
851 mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err);
853 wait_for_completion(&umr_context.done);
854 if (umr_context.status != IB_WC_SUCCESS) {
855 mlx5_ib_warn(dev, "reg umr failed (%u)\n",
864 static struct mlx5_ib_mr *alloc_mr_from_cache(
865 struct ib_pd *pd, struct ib_umem *umem,
866 u64 virt_addr, u64 len, int npages,
867 int page_shift, int order, int access_flags)
869 struct mlx5_ib_dev *dev = to_mdev(pd->device);
870 struct mlx5_ib_mr *mr;
874 for (i = 0; i < 1; i++) {
875 mr = alloc_cached_mr(dev, order);
879 err = add_keys(dev, order2idx(dev, order), 1);
880 if (err && err != -EAGAIN) {
881 mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
887 return ERR_PTR(-EAGAIN);
891 mr->access_flags = access_flags;
892 mr->desc_size = sizeof(struct mlx5_mtt);
893 mr->mmkey.iova = virt_addr;
894 mr->mmkey.size = len;
895 mr->mmkey.pd = to_mpd(pd)->pdn;
900 static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages,
901 void *xlt, int page_shift, size_t size,
904 struct mlx5_ib_dev *dev = mr->dev;
905 struct ib_umem *umem = mr->umem;
907 if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
908 if (!umr_can_use_indirect_mkey(dev))
910 mlx5_odp_populate_klm(xlt, idx, npages, mr, flags);
914 npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx);
916 if (!(flags & MLX5_IB_UPD_XLT_ZAP)) {
917 __mlx5_ib_populate_pas(dev, umem, page_shift,
919 MLX5_IB_MTT_PRESENT);
920 /* Clear padding after the pages
921 * brought from the umem.
923 memset(xlt + (npages * sizeof(struct mlx5_mtt)), 0,
924 size - npages * sizeof(struct mlx5_mtt));
930 #define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \
931 MLX5_UMR_MTT_ALIGNMENT)
932 #define MLX5_SPARE_UMR_CHUNK 0x10000
934 int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
935 int page_shift, int flags)
937 struct mlx5_ib_dev *dev = mr->dev;
938 struct device *ddev = dev->ib_dev.dev.parent;
942 struct mlx5_umr_wr wr;
945 int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT)
946 ? sizeof(struct mlx5_klm)
947 : sizeof(struct mlx5_mtt);
948 const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size;
949 const int page_mask = page_align - 1;
950 size_t pages_mapped = 0;
951 size_t pages_to_map = 0;
952 size_t pages_iter = 0;
954 bool use_emergency_page = false;
956 if ((flags & MLX5_IB_UPD_XLT_INDIRECT) &&
957 !umr_can_use_indirect_mkey(dev))
960 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
961 * so we need to align the offset and length accordingly
963 if (idx & page_mask) {
964 npages += idx & page_mask;
968 gfp = flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC : GFP_KERNEL;
969 gfp |= __GFP_ZERO | __GFP_NOWARN;
971 pages_to_map = ALIGN(npages, page_align);
972 size = desc_size * pages_to_map;
973 size = min_t(int, size, MLX5_MAX_UMR_CHUNK);
975 xlt = (void *)__get_free_pages(gfp, get_order(size));
976 if (!xlt && size > MLX5_SPARE_UMR_CHUNK) {
977 mlx5_ib_dbg(dev, "Failed to allocate %d bytes of order %d. fallback to spare UMR allocation od %d bytes\n",
978 size, get_order(size), MLX5_SPARE_UMR_CHUNK);
980 size = MLX5_SPARE_UMR_CHUNK;
981 xlt = (void *)__get_free_pages(gfp, get_order(size));
985 mlx5_ib_warn(dev, "Using XLT emergency buffer\n");
986 xlt = (void *)mlx5_ib_get_xlt_emergency_page();
988 memset(xlt, 0, size);
989 use_emergency_page = true;
991 pages_iter = size / desc_size;
992 dma = dma_map_single(ddev, xlt, size, DMA_TO_DEVICE);
993 if (dma_mapping_error(ddev, dma)) {
994 mlx5_ib_err(dev, "unable to map DMA during XLT update.\n");
1000 sg.lkey = dev->umrc.pd->local_dma_lkey;
1002 memset(&wr, 0, sizeof(wr));
1003 wr.wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT;
1004 if (!(flags & MLX5_IB_UPD_XLT_ENABLE))
1005 wr.wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1006 wr.wr.sg_list = &sg;
1008 wr.wr.opcode = MLX5_IB_WR_UMR;
1010 wr.pd = mr->ibmr.pd;
1011 wr.mkey = mr->mmkey.key;
1012 wr.length = mr->mmkey.size;
1013 wr.virt_addr = mr->mmkey.iova;
1014 wr.access_flags = mr->access_flags;
1015 wr.page_shift = page_shift;
1017 for (pages_mapped = 0;
1018 pages_mapped < pages_to_map && !err;
1019 pages_mapped += pages_iter, idx += pages_iter) {
1020 npages = min_t(int, pages_iter, pages_to_map - pages_mapped);
1021 dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
1022 npages = populate_xlt(mr, idx, npages, xlt,
1023 page_shift, size, flags);
1025 dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
1027 sg.length = ALIGN(npages * desc_size,
1028 MLX5_UMR_MTT_ALIGNMENT);
1030 if (pages_mapped + pages_iter >= pages_to_map) {
1031 if (flags & MLX5_IB_UPD_XLT_ENABLE)
1033 MLX5_IB_SEND_UMR_ENABLE_MR |
1034 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS |
1035 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1036 if (flags & MLX5_IB_UPD_XLT_PD ||
1037 flags & MLX5_IB_UPD_XLT_ACCESS)
1039 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
1040 if (flags & MLX5_IB_UPD_XLT_ADDR)
1042 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1045 wr.offset = idx * desc_size;
1046 wr.xlt_size = sg.length;
1048 err = mlx5_ib_post_send_wait(dev, &wr);
1050 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
1053 if (use_emergency_page)
1054 mlx5_ib_put_xlt_emergency_page();
1056 free_pages((unsigned long)xlt, get_order(size));
1062 * If ibmr is NULL it will be allocated by reg_create.
1063 * Else, the given ibmr will be used.
1065 static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
1066 u64 virt_addr, u64 length,
1067 struct ib_umem *umem, int npages,
1068 int page_shift, int access_flags,
1071 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1072 struct mlx5_ib_mr *mr;
1078 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
1080 mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL);
1082 return ERR_PTR(-ENOMEM);
1085 mr->access_flags = access_flags;
1087 inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1089 inlen += sizeof(*pas) * roundup(npages, 2);
1090 in = kvzalloc(inlen, GFP_KERNEL);
1095 pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
1096 if (populate && !(access_flags & IB_ACCESS_ON_DEMAND))
1097 mlx5_ib_populate_pas(dev, umem, page_shift, pas,
1098 pg_cap ? MLX5_IB_MTT_PRESENT : 0);
1100 /* The pg_access bit allows setting the access flags
1101 * in the page list submitted with the command. */
1102 MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
1104 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1105 MLX5_SET(mkc, mkc, free, !populate);
1106 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
1107 MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
1108 MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
1109 MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
1110 MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
1111 MLX5_SET(mkc, mkc, lr, 1);
1112 MLX5_SET(mkc, mkc, umr_en, 1);
1114 MLX5_SET64(mkc, mkc, start_addr, virt_addr);
1115 MLX5_SET64(mkc, mkc, len, length);
1116 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1117 MLX5_SET(mkc, mkc, bsf_octword_size, 0);
1118 MLX5_SET(mkc, mkc, translations_octword_size,
1119 get_octo_len(virt_addr, length, page_shift));
1120 MLX5_SET(mkc, mkc, log_page_size, page_shift);
1121 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1123 MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
1124 get_octo_len(virt_addr, length, page_shift));
1127 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
1129 mlx5_ib_warn(dev, "create mkey failed\n");
1132 mr->mmkey.type = MLX5_MKEY_MR;
1133 mr->desc_size = sizeof(struct mlx5_mtt);
1137 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
1148 return ERR_PTR(err);
1151 static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
1152 int npages, u64 length, int access_flags)
1154 mr->npages = npages;
1155 atomic_add(npages, &dev->mdev->priv.reg_pages);
1156 mr->ibmr.lkey = mr->mmkey.key;
1157 mr->ibmr.rkey = mr->mmkey.key;
1158 mr->ibmr.length = length;
1159 mr->access_flags = access_flags;
1162 static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr,
1163 u64 length, int acc, int mode)
1165 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1166 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1167 struct mlx5_core_dev *mdev = dev->mdev;
1168 struct mlx5_ib_mr *mr;
1173 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1175 return ERR_PTR(-ENOMEM);
1177 in = kzalloc(inlen, GFP_KERNEL);
1183 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1185 MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
1186 MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7);
1187 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
1188 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
1189 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
1190 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
1191 MLX5_SET(mkc, mkc, lr, 1);
1193 MLX5_SET64(mkc, mkc, len, length);
1194 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1195 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1196 MLX5_SET64(mkc, mkc, start_addr, start_addr);
1198 err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
1205 set_mr_fields(dev, mr, 0, length, acc);
1215 return ERR_PTR(err);
1218 int mlx5_ib_advise_mr(struct ib_pd *pd,
1219 enum ib_uverbs_advise_mr_advice advice,
1221 struct ib_sge *sg_list,
1223 struct uverbs_attr_bundle *attrs)
1225 if (advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH &&
1226 advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE)
1229 return mlx5_ib_advise_mr_prefetch(pd, advice, flags,
1233 struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
1234 struct ib_dm_mr_attr *attr,
1235 struct uverbs_attr_bundle *attrs)
1237 struct mlx5_ib_dm *mdm = to_mdm(dm);
1238 struct mlx5_core_dev *dev = to_mdev(dm->device)->mdev;
1239 u64 start_addr = mdm->dev_addr + attr->offset;
1242 switch (mdm->type) {
1243 case MLX5_IB_UAPI_DM_TYPE_MEMIC:
1244 if (attr->access_flags & ~MLX5_IB_DM_MEMIC_ALLOWED_ACCESS)
1245 return ERR_PTR(-EINVAL);
1247 mode = MLX5_MKC_ACCESS_MODE_MEMIC;
1248 start_addr -= pci_resource_start(dev->pdev, 0);
1250 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
1251 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
1252 if (attr->access_flags & ~MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS)
1253 return ERR_PTR(-EINVAL);
1255 mode = MLX5_MKC_ACCESS_MODE_SW_ICM;
1258 return ERR_PTR(-EINVAL);
1261 return mlx5_ib_get_dm_mr(pd, start_addr, attr->length,
1262 attr->access_flags, mode);
1265 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1266 u64 virt_addr, int access_flags,
1267 struct ib_udata *udata)
1269 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1270 struct mlx5_ib_mr *mr = NULL;
1271 bool populate_mtts = false;
1272 struct ib_umem *umem;
1279 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
1280 return ERR_PTR(-EOPNOTSUPP);
1282 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1283 start, virt_addr, length, access_flags);
1285 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && !start &&
1286 length == U64_MAX) {
1287 if (!(access_flags & IB_ACCESS_ON_DEMAND) ||
1288 !(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1289 return ERR_PTR(-EINVAL);
1291 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), udata, access_flags);
1293 return ERR_CAST(mr);
1297 err = mr_umem_get(dev, udata, start, length, access_flags, &umem,
1298 &npages, &page_shift, &ncont, &order);
1301 return ERR_PTR(err);
1303 if (use_umr(dev, order)) {
1304 mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
1305 page_shift, order, access_flags);
1306 if (PTR_ERR(mr) == -EAGAIN) {
1307 mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
1310 populate_mtts = false;
1311 } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
1312 if (access_flags & IB_ACCESS_ON_DEMAND) {
1314 pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
1317 populate_mtts = true;
1321 if (!umr_can_modify_entity_size(dev))
1322 populate_mtts = true;
1323 mutex_lock(&dev->slow_path_mutex);
1324 mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
1325 page_shift, access_flags, populate_mtts);
1326 mutex_unlock(&dev->slow_path_mutex);
1334 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
1337 set_mr_fields(dev, mr, npages, length, access_flags);
1341 if (!populate_mtts) {
1342 int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
1344 if (access_flags & IB_ACCESS_ON_DEMAND)
1345 update_xlt_flags |= MLX5_IB_UPD_XLT_ZAP;
1347 err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift,
1352 return ERR_PTR(err);
1356 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
1358 atomic_set(&mr->num_pending_prefetch, 0);
1363 ib_umem_release(umem);
1364 return ERR_PTR(err);
1367 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1369 struct mlx5_core_dev *mdev = dev->mdev;
1370 struct mlx5_umr_wr umrwr = {};
1372 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
1375 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
1376 MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1377 umrwr.wr.opcode = MLX5_IB_WR_UMR;
1378 umrwr.mkey = mr->mmkey.key;
1380 return mlx5_ib_post_send_wait(dev, &umrwr);
1383 static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1384 int access_flags, int flags)
1386 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1387 struct mlx5_umr_wr umrwr = {};
1390 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1392 umrwr.wr.opcode = MLX5_IB_WR_UMR;
1393 umrwr.mkey = mr->mmkey.key;
1395 if (flags & IB_MR_REREG_PD || flags & IB_MR_REREG_ACCESS) {
1397 umrwr.access_flags = access_flags;
1398 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
1401 err = mlx5_ib_post_send_wait(dev, &umrwr);
1406 int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1407 u64 length, u64 virt_addr, int new_access_flags,
1408 struct ib_pd *new_pd, struct ib_udata *udata)
1410 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
1411 struct mlx5_ib_mr *mr = to_mmr(ib_mr);
1412 struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd;
1413 int access_flags = flags & IB_MR_REREG_ACCESS ?
1424 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1425 start, virt_addr, length, access_flags);
1427 atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
1432 if (flags & IB_MR_REREG_TRANS) {
1436 addr = mr->umem->address;
1437 len = mr->umem->length;
1440 if (flags != IB_MR_REREG_PD) {
1442 * Replace umem. This needs to be done whether or not UMR is
1445 flags |= IB_MR_REREG_TRANS;
1446 ib_umem_release(mr->umem);
1448 err = mr_umem_get(dev, udata, addr, len, access_flags,
1449 &mr->umem, &npages, &page_shift, &ncont,
1455 if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) {
1457 * UMR can't be used - MKey needs to be replaced.
1459 if (mr->allocated_from_cache)
1460 err = unreg_umr(dev, mr);
1462 err = destroy_mkey(dev, mr);
1466 mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
1467 page_shift, access_flags, true);
1475 mr->allocated_from_cache = 0;
1476 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1483 mr->access_flags = access_flags;
1484 mr->mmkey.iova = addr;
1485 mr->mmkey.size = len;
1486 mr->mmkey.pd = to_mpd(pd)->pdn;
1488 if (flags & IB_MR_REREG_TRANS) {
1489 upd_flags = MLX5_IB_UPD_XLT_ADDR;
1490 if (flags & IB_MR_REREG_PD)
1491 upd_flags |= MLX5_IB_UPD_XLT_PD;
1492 if (flags & IB_MR_REREG_ACCESS)
1493 upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
1494 err = mlx5_ib_update_xlt(mr, 0, npages, page_shift,
1497 err = rereg_umr(pd, mr, access_flags, flags);
1504 set_mr_fields(dev, mr, npages, len, access_flags);
1511 ib_umem_release(mr->umem);
1519 mlx5_alloc_priv_descs(struct ib_device *device,
1520 struct mlx5_ib_mr *mr,
1524 int size = ndescs * desc_size;
1528 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
1530 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
1531 if (!mr->descs_alloc)
1534 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
1536 mr->desc_map = dma_map_single(device->dev.parent, mr->descs,
1537 size, DMA_TO_DEVICE);
1538 if (dma_mapping_error(device->dev.parent, mr->desc_map)) {
1545 kfree(mr->descs_alloc);
1551 mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1554 struct ib_device *device = mr->ibmr.device;
1555 int size = mr->max_descs * mr->desc_size;
1557 dma_unmap_single(device->dev.parent, mr->desc_map,
1558 size, DMA_TO_DEVICE);
1559 kfree(mr->descs_alloc);
1564 static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1566 int allocated_from_cache = mr->allocated_from_cache;
1569 if (mlx5_core_destroy_psv(dev->mdev,
1570 mr->sig->psv_memory.psv_idx))
1571 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1572 mr->sig->psv_memory.psv_idx);
1573 if (mlx5_core_destroy_psv(dev->mdev,
1574 mr->sig->psv_wire.psv_idx))
1575 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1576 mr->sig->psv_wire.psv_idx);
1581 mlx5_free_priv_descs(mr);
1583 if (!allocated_from_cache)
1584 destroy_mkey(dev, mr);
1587 static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1589 int npages = mr->npages;
1590 struct ib_umem *umem = mr->umem;
1592 if (is_odp_mr(mr)) {
1593 struct ib_umem_odp *umem_odp = to_ib_umem_odp(umem);
1595 /* Prevent new page faults and
1596 * prefetch requests from succeeding
1600 /* dequeue pending prefetch requests for the mr */
1601 if (atomic_read(&mr->num_pending_prefetch))
1602 flush_workqueue(system_unbound_wq);
1603 WARN_ON(atomic_read(&mr->num_pending_prefetch));
1605 /* Wait for all running page-fault handlers to finish. */
1606 synchronize_srcu(&dev->mr_srcu);
1607 /* Destroy all page mappings */
1608 if (umem_odp->page_list)
1609 mlx5_ib_invalidate_range(umem_odp, ib_umem_start(umem),
1612 mlx5_ib_free_implicit_mr(mr);
1614 * We kill the umem before the MR for ODP,
1615 * so that there will not be any invalidations in
1616 * flight, looking at the *mr struct.
1618 ib_umem_release(umem);
1619 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1621 /* Avoid double-freeing the umem. */
1628 * We should unregister the DMA address from the HCA before
1629 * remove the DMA mapping.
1631 mlx5_mr_cache_free(dev, mr);
1633 ib_umem_release(umem);
1634 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1636 if (!mr->allocated_from_cache)
1640 int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1642 dereg_mr(to_mdev(ibmr->device), to_mmr(ibmr));
1646 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1647 u32 max_num_sg, struct ib_udata *udata)
1649 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1650 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1651 int ndescs = ALIGN(max_num_sg, 4);
1652 struct mlx5_ib_mr *mr;
1657 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1659 return ERR_PTR(-ENOMEM);
1661 in = kzalloc(inlen, GFP_KERNEL);
1667 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1668 MLX5_SET(mkc, mkc, free, 1);
1669 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1670 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1671 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1673 if (mr_type == IB_MR_TYPE_MEM_REG) {
1674 mr->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
1675 MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
1676 err = mlx5_alloc_priv_descs(pd->device, mr,
1677 ndescs, sizeof(struct mlx5_mtt));
1681 mr->desc_size = sizeof(struct mlx5_mtt);
1682 mr->max_descs = ndescs;
1683 } else if (mr_type == IB_MR_TYPE_SG_GAPS) {
1684 mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
1686 err = mlx5_alloc_priv_descs(pd->device, mr,
1687 ndescs, sizeof(struct mlx5_klm));
1690 mr->desc_size = sizeof(struct mlx5_klm);
1691 mr->max_descs = ndescs;
1692 } else if (mr_type == IB_MR_TYPE_SIGNATURE) {
1695 MLX5_SET(mkc, mkc, bsf_en, 1);
1696 MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
1697 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
1703 /* create mem & wire PSVs */
1704 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
1709 mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
1710 mr->sig->psv_memory.psv_idx = psv_index[0];
1711 mr->sig->psv_wire.psv_idx = psv_index[1];
1713 mr->sig->sig_status_checked = true;
1714 mr->sig->sig_err_exists = false;
1715 /* Next UMR, Arm SIGERR */
1716 ++mr->sig->sigerr_count;
1718 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
1723 MLX5_SET(mkc, mkc, access_mode_1_0, mr->access_mode & 0x3);
1724 MLX5_SET(mkc, mkc, access_mode_4_2, (mr->access_mode >> 2) & 0x7);
1725 MLX5_SET(mkc, mkc, umr_en, 1);
1727 mr->ibmr.device = pd->device;
1728 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
1730 goto err_destroy_psv;
1732 mr->mmkey.type = MLX5_MKEY_MR;
1733 mr->ibmr.lkey = mr->mmkey.key;
1734 mr->ibmr.rkey = mr->mmkey.key;
1742 if (mlx5_core_destroy_psv(dev->mdev,
1743 mr->sig->psv_memory.psv_idx))
1744 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1745 mr->sig->psv_memory.psv_idx);
1746 if (mlx5_core_destroy_psv(dev->mdev,
1747 mr->sig->psv_wire.psv_idx))
1748 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1749 mr->sig->psv_wire.psv_idx);
1751 mlx5_free_priv_descs(mr);
1758 return ERR_PTR(err);
1761 struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
1762 struct ib_udata *udata)
1764 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1765 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1766 struct mlx5_ib_mw *mw = NULL;
1771 struct mlx5_ib_alloc_mw req = {};
1774 __u32 response_length;
1777 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
1779 return ERR_PTR(err);
1781 if (req.comp_mask || req.reserved1 || req.reserved2)
1782 return ERR_PTR(-EOPNOTSUPP);
1784 if (udata->inlen > sizeof(req) &&
1785 !ib_is_udata_cleared(udata, sizeof(req),
1786 udata->inlen - sizeof(req)))
1787 return ERR_PTR(-EOPNOTSUPP);
1789 ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
1791 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
1792 in = kzalloc(inlen, GFP_KERNEL);
1798 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1800 MLX5_SET(mkc, mkc, free, 1);
1801 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1802 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
1803 MLX5_SET(mkc, mkc, umr_en, 1);
1804 MLX5_SET(mkc, mkc, lr, 1);
1805 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
1806 MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2)));
1807 MLX5_SET(mkc, mkc, qpn, 0xffffff);
1809 err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, inlen);
1813 mw->mmkey.type = MLX5_MKEY_MW;
1814 mw->ibmw.rkey = mw->mmkey.key;
1815 mw->ndescs = ndescs;
1817 resp.response_length = min(offsetof(typeof(resp), response_length) +
1818 sizeof(resp.response_length), udata->outlen);
1819 if (resp.response_length) {
1820 err = ib_copy_to_udata(udata, &resp, resp.response_length);
1822 mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
1833 return ERR_PTR(err);
1836 int mlx5_ib_dealloc_mw(struct ib_mw *mw)
1838 struct mlx5_ib_mw *mmw = to_mmw(mw);
1841 err = mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev,
1848 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1849 struct ib_mr_status *mr_status)
1851 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1854 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
1855 pr_err("Invalid status check mask\n");
1860 mr_status->fail_status = 0;
1861 if (check_mask & IB_MR_CHECK_SIG_STATUS) {
1864 pr_err("signature status check requested on a non-signature enabled MR\n");
1868 mmr->sig->sig_status_checked = true;
1869 if (!mmr->sig->sig_err_exists)
1872 if (ibmr->lkey == mmr->sig->err_item.key)
1873 memcpy(&mr_status->sig_err, &mmr->sig->err_item,
1874 sizeof(mr_status->sig_err));
1876 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
1877 mr_status->sig_err.sig_err_offset = 0;
1878 mr_status->sig_err.key = mmr->sig->err_item.key;
1881 mmr->sig->sig_err_exists = false;
1882 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
1890 mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
1891 struct scatterlist *sgl,
1892 unsigned short sg_nents,
1893 unsigned int *sg_offset_p)
1895 struct scatterlist *sg = sgl;
1896 struct mlx5_klm *klms = mr->descs;
1897 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1898 u32 lkey = mr->ibmr.pd->local_dma_lkey;
1901 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
1902 mr->ibmr.length = 0;
1904 for_each_sg(sgl, sg, sg_nents, i) {
1905 if (unlikely(i >= mr->max_descs))
1907 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
1908 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
1909 klms[i].key = cpu_to_be32(lkey);
1910 mr->ibmr.length += sg_dma_len(sg) - sg_offset;
1917 *sg_offset_p = sg_offset;
1922 static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
1924 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1927 if (unlikely(mr->ndescs == mr->max_descs))
1931 descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
1936 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
1937 unsigned int *sg_offset)
1939 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1944 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
1945 mr->desc_size * mr->max_descs,
1948 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
1949 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset);
1951 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
1954 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
1955 mr->desc_size * mr->max_descs,