1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
3 * Copyright (c) 2020 Intel Corporation. All rights reserved.
6 #include <linux/dma-buf.h>
7 #include <linux/dma-resv.h>
8 #include <linux/dma-mapping.h>
12 int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
15 struct scatterlist *sg;
16 struct dma_fence *fence;
17 unsigned long start, end, cur = 0;
18 unsigned int nmap = 0;
21 dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
26 sgt = dma_buf_map_attachment(umem_dmabuf->attach, DMA_BIDIRECTIONAL);
30 /* modify the sg list in-place to match umem address and length */
32 start = ALIGN_DOWN(umem_dmabuf->umem.address, PAGE_SIZE);
33 end = ALIGN(umem_dmabuf->umem.address + umem_dmabuf->umem.length,
35 for_each_sgtable_dma_sg(sgt, sg, i) {
36 if (start < cur + sg_dma_len(sg) && cur < end)
38 if (cur <= start && start < cur + sg_dma_len(sg)) {
39 unsigned long offset = start - cur;
41 umem_dmabuf->first_sg = sg;
42 umem_dmabuf->first_sg_offset = offset;
43 sg_dma_address(sg) += offset;
44 sg_dma_len(sg) -= offset;
47 if (cur < end && end <= cur + sg_dma_len(sg)) {
48 unsigned long trim = cur + sg_dma_len(sg) - end;
50 umem_dmabuf->last_sg = sg;
51 umem_dmabuf->last_sg_trim = trim;
52 sg_dma_len(sg) -= trim;
55 cur += sg_dma_len(sg);
58 umem_dmabuf->umem.sg_head.sgl = umem_dmabuf->first_sg;
59 umem_dmabuf->umem.sg_head.nents = nmap;
60 umem_dmabuf->umem.nmap = nmap;
61 umem_dmabuf->sgt = sgt;
65 * Although the sg list is valid now, the content of the pages
66 * may be not up-to-date. Wait for the exporter to finish
69 fence = dma_resv_get_excl(umem_dmabuf->attach->dmabuf->resv);
71 return dma_fence_wait(fence, false);
75 EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
77 void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf)
79 dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
81 if (!umem_dmabuf->sgt)
84 /* retore the original sg list */
85 if (umem_dmabuf->first_sg) {
86 sg_dma_address(umem_dmabuf->first_sg) -=
87 umem_dmabuf->first_sg_offset;
88 sg_dma_len(umem_dmabuf->first_sg) +=
89 umem_dmabuf->first_sg_offset;
90 umem_dmabuf->first_sg = NULL;
91 umem_dmabuf->first_sg_offset = 0;
93 if (umem_dmabuf->last_sg) {
94 sg_dma_len(umem_dmabuf->last_sg) +=
95 umem_dmabuf->last_sg_trim;
96 umem_dmabuf->last_sg = NULL;
97 umem_dmabuf->last_sg_trim = 0;
100 dma_buf_unmap_attachment(umem_dmabuf->attach, umem_dmabuf->sgt,
103 umem_dmabuf->sgt = NULL;
105 EXPORT_SYMBOL(ib_umem_dmabuf_unmap_pages);
107 struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
108 unsigned long offset, size_t size,
110 const struct dma_buf_attach_ops *ops)
112 struct dma_buf *dmabuf;
113 struct ib_umem_dmabuf *umem_dmabuf;
114 struct ib_umem *umem;
116 struct ib_umem_dmabuf *ret = ERR_PTR(-EINVAL);
118 if (check_add_overflow(offset, (unsigned long)size, &end))
121 if (unlikely(!ops || !ops->move_notify))
124 dmabuf = dma_buf_get(fd);
126 return ERR_CAST(dmabuf);
128 if (dmabuf->size < end)
129 goto out_release_dmabuf;
131 umem_dmabuf = kzalloc(sizeof(*umem_dmabuf), GFP_KERNEL);
133 ret = ERR_PTR(-ENOMEM);
134 goto out_release_dmabuf;
137 umem = &umem_dmabuf->umem;
138 umem->ibdev = device;
140 umem->address = offset;
141 umem->writable = ib_access_writable(access);
144 if (!ib_umem_num_pages(umem))
147 umem_dmabuf->attach = dma_buf_dynamic_attach(
152 if (IS_ERR(umem_dmabuf->attach)) {
153 ret = ERR_CAST(umem_dmabuf->attach);
165 EXPORT_SYMBOL(ib_umem_dmabuf_get);
167 void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf)
169 struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf;
171 dma_resv_lock(dmabuf->resv, NULL);
172 ib_umem_dmabuf_unmap_pages(umem_dmabuf);
173 dma_resv_unlock(dmabuf->resv);
175 dma_buf_detach(dmabuf, umem_dmabuf->attach);