1 /* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
3 * Copyright (c) 2017 Hisilicon Limited.
4 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
7 #include <linux/platform_device.h>
8 #include <rdma/ib_umem.h>
9 #include "hns_roce_device.h"
11 int hns_roce_db_map_user(struct hns_roce_ucontext *context,
12 struct ib_udata *udata, unsigned long virt,
13 struct hns_roce_db *db)
15 struct hns_roce_user_db_page *page;
18 mutex_lock(&context->page_mutex);
20 list_for_each_entry(page, &context->page_list, list)
21 if (page->user_virt == (virt & PAGE_MASK))
24 page = kmalloc(sizeof(*page), GFP_KERNEL);
30 refcount_set(&page->refcount, 1);
31 page->user_virt = (virt & PAGE_MASK);
32 page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0);
33 if (IS_ERR(page->umem)) {
34 ret = PTR_ERR(page->umem);
39 list_add(&page->list, &context->page_list);
42 db->dma = sg_dma_address(page->umem->sg_head.sgl) +
44 page->umem->sg_head.sgl->offset = virt & ~PAGE_MASK;
45 db->virt_addr = sg_virt(page->umem->sg_head.sgl);
46 db->u.user_page = page;
47 refcount_inc(&page->refcount);
50 mutex_unlock(&context->page_mutex);
54 EXPORT_SYMBOL(hns_roce_db_map_user);
56 void hns_roce_db_unmap_user(struct hns_roce_ucontext *context,
57 struct hns_roce_db *db)
59 mutex_lock(&context->page_mutex);
61 refcount_dec(&db->u.user_page->refcount);
62 if (refcount_dec_if_one(&db->u.user_page->refcount)) {
63 list_del(&db->u.user_page->list);
64 ib_umem_release(db->u.user_page->umem);
65 kfree(db->u.user_page);
68 mutex_unlock(&context->page_mutex);
70 EXPORT_SYMBOL(hns_roce_db_unmap_user);
72 static struct hns_roce_db_pgdir *hns_roce_alloc_db_pgdir(
73 struct device *dma_device)
75 struct hns_roce_db_pgdir *pgdir;
77 pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
81 bitmap_fill(pgdir->order1, HNS_ROCE_DB_PER_PAGE / 2);
82 pgdir->bits[0] = pgdir->order0;
83 pgdir->bits[1] = pgdir->order1;
84 pgdir->page = dma_alloc_coherent(dma_device, PAGE_SIZE,
85 &pgdir->db_dma, GFP_KERNEL);
94 static int hns_roce_alloc_db_from_pgdir(struct hns_roce_db_pgdir *pgdir,
95 struct hns_roce_db *db, int order)
100 for (o = order; o <= 1; ++o) {
101 i = find_first_bit(pgdir->bits[o], HNS_ROCE_DB_PER_PAGE >> o);
102 if (i < HNS_ROCE_DB_PER_PAGE >> o)
109 clear_bit(i, pgdir->bits[o]);
114 set_bit(i ^ 1, pgdir->bits[order]);
118 db->db_record = pgdir->page + db->index;
119 db->dma = pgdir->db_dma + db->index * 4;
125 int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db,
128 struct hns_roce_db_pgdir *pgdir;
131 mutex_lock(&hr_dev->pgdir_mutex);
133 list_for_each_entry(pgdir, &hr_dev->pgdir_list, list)
134 if (!hns_roce_alloc_db_from_pgdir(pgdir, db, order))
137 pgdir = hns_roce_alloc_db_pgdir(hr_dev->dev);
143 list_add(&pgdir->list, &hr_dev->pgdir_list);
145 /* This should never fail -- we just allocated an empty page: */
146 WARN_ON(hns_roce_alloc_db_from_pgdir(pgdir, db, order));
149 mutex_unlock(&hr_dev->pgdir_mutex);
153 EXPORT_SYMBOL_GPL(hns_roce_alloc_db);
155 void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db)
160 mutex_lock(&hr_dev->pgdir_mutex);
165 if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
166 clear_bit(i ^ 1, db->u.pgdir->order0);
171 set_bit(i, db->u.pgdir->bits[o]);
173 if (bitmap_full(db->u.pgdir->order1, HNS_ROCE_DB_PER_PAGE / 2)) {
174 dma_free_coherent(hr_dev->dev, PAGE_SIZE, db->u.pgdir->page,
175 db->u.pgdir->db_dma);
176 list_del(&db->u.pgdir->list);
180 mutex_unlock(&hr_dev->pgdir_mutex);
182 EXPORT_SYMBOL_GPL(hns_roce_free_db);