1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
3 * Copyright (c) 2020 Intel Corporation. All rights reserved.
6 #include <linux/dma-buf.h>
7 #include <linux/dma-resv.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/module.h>
13 MODULE_IMPORT_NS(DMA_BUF);
15 int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
18 struct scatterlist *sg;
19 unsigned long start, end, cur = 0;
20 unsigned int nmap = 0;
23 dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
28 sgt = dma_buf_map_attachment(umem_dmabuf->attach, DMA_BIDIRECTIONAL);
32 /* modify the sg list in-place to match umem address and length */
34 start = ALIGN_DOWN(umem_dmabuf->umem.address, PAGE_SIZE);
35 end = ALIGN(umem_dmabuf->umem.address + umem_dmabuf->umem.length,
37 for_each_sgtable_dma_sg(sgt, sg, i) {
38 if (start < cur + sg_dma_len(sg) && cur < end)
40 if (cur <= start && start < cur + sg_dma_len(sg)) {
41 unsigned long offset = start - cur;
43 umem_dmabuf->first_sg = sg;
44 umem_dmabuf->first_sg_offset = offset;
45 sg_dma_address(sg) += offset;
46 sg_dma_len(sg) -= offset;
49 if (cur < end && end <= cur + sg_dma_len(sg)) {
50 unsigned long trim = cur + sg_dma_len(sg) - end;
52 umem_dmabuf->last_sg = sg;
53 umem_dmabuf->last_sg_trim = trim;
54 sg_dma_len(sg) -= trim;
57 cur += sg_dma_len(sg);
60 umem_dmabuf->umem.sgt_append.sgt.sgl = umem_dmabuf->first_sg;
61 umem_dmabuf->umem.sgt_append.sgt.nents = nmap;
62 umem_dmabuf->sgt = sgt;
66 * Although the sg list is valid now, the content of the pages
67 * may be not up-to-date. Wait for the exporter to finish
70 return dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv,
71 DMA_RESV_USAGE_KERNEL,
72 false, MAX_SCHEDULE_TIMEOUT);
74 EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
76 void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf)
78 dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
80 if (!umem_dmabuf->sgt)
83 /* retore the original sg list */
84 if (umem_dmabuf->first_sg) {
85 sg_dma_address(umem_dmabuf->first_sg) -=
86 umem_dmabuf->first_sg_offset;
87 sg_dma_len(umem_dmabuf->first_sg) +=
88 umem_dmabuf->first_sg_offset;
89 umem_dmabuf->first_sg = NULL;
90 umem_dmabuf->first_sg_offset = 0;
92 if (umem_dmabuf->last_sg) {
93 sg_dma_len(umem_dmabuf->last_sg) +=
94 umem_dmabuf->last_sg_trim;
95 umem_dmabuf->last_sg = NULL;
96 umem_dmabuf->last_sg_trim = 0;
99 dma_buf_unmap_attachment(umem_dmabuf->attach, umem_dmabuf->sgt,
102 umem_dmabuf->sgt = NULL;
104 EXPORT_SYMBOL(ib_umem_dmabuf_unmap_pages);
106 struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
107 unsigned long offset, size_t size,
109 const struct dma_buf_attach_ops *ops)
111 struct dma_buf *dmabuf;
112 struct ib_umem_dmabuf *umem_dmabuf;
113 struct ib_umem *umem;
115 struct ib_umem_dmabuf *ret = ERR_PTR(-EINVAL);
117 if (check_add_overflow(offset, (unsigned long)size, &end))
120 if (unlikely(!ops || !ops->move_notify))
123 dmabuf = dma_buf_get(fd);
125 return ERR_CAST(dmabuf);
127 if (dmabuf->size < end)
128 goto out_release_dmabuf;
130 umem_dmabuf = kzalloc(sizeof(*umem_dmabuf), GFP_KERNEL);
132 ret = ERR_PTR(-ENOMEM);
133 goto out_release_dmabuf;
136 umem = &umem_dmabuf->umem;
137 umem->ibdev = device;
139 umem->address = offset;
140 umem->writable = ib_access_writable(access);
143 if (!ib_umem_num_pages(umem))
146 umem_dmabuf->attach = dma_buf_dynamic_attach(
151 if (IS_ERR(umem_dmabuf->attach)) {
152 ret = ERR_CAST(umem_dmabuf->attach);
164 EXPORT_SYMBOL(ib_umem_dmabuf_get);
167 ib_umem_dmabuf_unsupported_move_notify(struct dma_buf_attachment *attach)
169 struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv;
171 ibdev_warn_ratelimited(umem_dmabuf->umem.ibdev,
172 "Invalidate callback should not be called when memory is pinned\n");
175 static struct dma_buf_attach_ops ib_umem_dmabuf_attach_pinned_ops = {
176 .allow_peer2peer = true,
177 .move_notify = ib_umem_dmabuf_unsupported_move_notify,
180 struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
181 unsigned long offset,
185 struct ib_umem_dmabuf *umem_dmabuf;
188 umem_dmabuf = ib_umem_dmabuf_get(device, offset, size, fd, access,
189 &ib_umem_dmabuf_attach_pinned_ops);
190 if (IS_ERR(umem_dmabuf))
193 dma_resv_lock(umem_dmabuf->attach->dmabuf->resv, NULL);
194 err = dma_buf_pin(umem_dmabuf->attach);
197 umem_dmabuf->pinned = 1;
199 err = ib_umem_dmabuf_map_pages(umem_dmabuf);
202 dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
207 dma_buf_unpin(umem_dmabuf->attach);
209 dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
210 ib_umem_release(&umem_dmabuf->umem);
213 EXPORT_SYMBOL(ib_umem_dmabuf_get_pinned);
215 void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf)
217 struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf;
219 dma_resv_lock(dmabuf->resv, NULL);
220 ib_umem_dmabuf_unmap_pages(umem_dmabuf);
221 if (umem_dmabuf->pinned)
222 dma_buf_unpin(umem_dmabuf->attach);
223 dma_resv_unlock(dmabuf->resv);
225 dma_buf_detach(dmabuf, umem_dmabuf->attach);