2 * Copyright 2012 Red Hat Inc
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Dave Airlie <airlied@redhat.com>
28 #include <linux/dma-buf.h>
30 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
32 return to_intel_bo(buf->priv);
35 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
36 enum dma_data_direction dir)
38 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
40 struct scatterlist *src, *dst;
43 ret = i915_mutex_lock_interruptible(obj->base.dev);
47 ret = i915_gem_object_get_pages(obj);
51 i915_gem_object_pin_pages(obj);
53 /* Copy sg so that we make an independent mapping */
54 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
60 ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
64 src = obj->pages->sgl;
66 for (i = 0; i < obj->pages->nents; i++) {
67 sg_set_page(dst, sg_page(src), src->length, 0);
72 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
77 mutex_unlock(&obj->base.dev->struct_mutex);
85 i915_gem_object_unpin_pages(obj);
87 mutex_unlock(&obj->base.dev->struct_mutex);
92 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
94 enum dma_data_direction dir)
96 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
98 mutex_lock(&obj->base.dev->struct_mutex);
100 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
104 i915_gem_object_unpin_pages(obj);
106 mutex_unlock(&obj->base.dev->struct_mutex);
109 static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
111 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
112 struct drm_device *dev = obj->base.dev;
113 struct sg_page_iter sg_iter;
117 ret = i915_mutex_lock_interruptible(dev);
121 if (obj->dma_buf_vmapping) {
122 obj->vmapping_count++;
126 ret = i915_gem_object_get_pages(obj);
130 i915_gem_object_pin_pages(obj);
134 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
139 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
140 pages[i++] = sg_page_iter_page(&sg_iter);
142 obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
143 drm_free_large(pages);
145 if (!obj->dma_buf_vmapping)
148 obj->vmapping_count = 1;
150 mutex_unlock(&dev->struct_mutex);
151 return obj->dma_buf_vmapping;
154 i915_gem_object_unpin_pages(obj);
156 mutex_unlock(&dev->struct_mutex);
160 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
162 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
163 struct drm_device *dev = obj->base.dev;
165 mutex_lock(&dev->struct_mutex);
166 if (--obj->vmapping_count == 0) {
167 vunmap(obj->dma_buf_vmapping);
168 obj->dma_buf_vmapping = NULL;
170 i915_gem_object_unpin_pages(obj);
172 mutex_unlock(&dev->struct_mutex);
175 static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
180 static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
184 static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
189 static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
194 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
196 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
199 if (obj->base.size < vma->vm_end - vma->vm_start)
205 ret = obj->base.filp->f_op->mmap(obj->base.filp, vma);
210 vma->vm_file = get_file(obj->base.filp);
215 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
217 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
218 struct drm_device *dev = obj->base.dev;
220 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
222 ret = i915_mutex_lock_interruptible(dev);
226 ret = i915_gem_object_set_to_cpu_domain(obj, write);
227 mutex_unlock(&dev->struct_mutex);
231 static void i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
233 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
234 struct drm_device *dev = obj->base.dev;
235 struct drm_i915_private *dev_priv = to_i915(dev);
236 bool was_interruptible;
239 mutex_lock(&dev->struct_mutex);
240 was_interruptible = dev_priv->mm.interruptible;
241 dev_priv->mm.interruptible = false;
243 ret = i915_gem_object_set_to_gtt_domain(obj, false);
245 dev_priv->mm.interruptible = was_interruptible;
246 mutex_unlock(&dev->struct_mutex);
249 DRM_ERROR("unable to flush buffer following CPU access; rendering may be corrupt\n");
252 static const struct dma_buf_ops i915_dmabuf_ops = {
253 .map_dma_buf = i915_gem_map_dma_buf,
254 .unmap_dma_buf = i915_gem_unmap_dma_buf,
255 .release = drm_gem_dmabuf_release,
256 .kmap = i915_gem_dmabuf_kmap,
257 .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
258 .kunmap = i915_gem_dmabuf_kunmap,
259 .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
260 .mmap = i915_gem_dmabuf_mmap,
261 .vmap = i915_gem_dmabuf_vmap,
262 .vunmap = i915_gem_dmabuf_vunmap,
263 .begin_cpu_access = i915_gem_begin_cpu_access,
264 .end_cpu_access = i915_gem_end_cpu_access,
267 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
268 struct drm_gem_object *gem_obj, int flags)
270 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
271 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
273 exp_info.ops = &i915_dmabuf_ops;
274 exp_info.size = gem_obj->size;
275 exp_info.flags = flags;
276 exp_info.priv = gem_obj;
279 if (obj->ops->dmabuf_export) {
280 int ret = obj->ops->dmabuf_export(obj);
285 return dma_buf_export(&exp_info);
288 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
292 sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
300 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
302 dma_buf_unmap_attachment(obj->base.import_attach,
303 obj->pages, DMA_BIDIRECTIONAL);
306 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
307 .get_pages = i915_gem_object_get_pages_dmabuf,
308 .put_pages = i915_gem_object_put_pages_dmabuf,
311 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
312 struct dma_buf *dma_buf)
314 struct dma_buf_attachment *attach;
315 struct drm_i915_gem_object *obj;
318 /* is this one of own objects? */
319 if (dma_buf->ops == &i915_dmabuf_ops) {
320 obj = dma_buf_to_obj(dma_buf);
321 /* is it from our device? */
322 if (obj->base.dev == dev) {
324 * Importing dmabuf exported from out own gem increases
325 * refcount on gem itself instead of f_count of dmabuf.
327 drm_gem_object_reference(&obj->base);
333 attach = dma_buf_attach(dma_buf, dev->dev);
335 return ERR_CAST(attach);
337 get_dma_buf(dma_buf);
339 obj = i915_gem_object_alloc(dev);
345 drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
346 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
347 obj->base.import_attach = attach;
352 dma_buf_detach(dma_buf, attach);
353 dma_buf_put(dma_buf);